summaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/spi/Kconfig1197
-rw-r--r--drivers/spi/Makefile151
-rw-r--r--drivers/spi/atmel-quadspi.c813
-rw-r--r--drivers/spi/internals.h43
-rw-r--r--drivers/spi/spi-altera-core.c222
-rw-r--r--drivers/spi/spi-altera-dfl.c201
-rw-r--r--drivers/spi/spi-altera-platform.c172
-rw-r--r--drivers/spi/spi-amd.c444
-rw-r--r--drivers/spi/spi-ar934x.c251
-rw-r--r--drivers/spi/spi-armada-3700.c935
-rw-r--r--drivers/spi/spi-aspeed-smc.c1218
-rw-r--r--drivers/spi/spi-at91-usart.c681
-rw-r--r--drivers/spi/spi-ath79.c277
-rw-r--r--drivers/spi/spi-atmel.c1802
-rw-r--r--drivers/spi/spi-au1550.c993
-rw-r--r--drivers/spi/spi-axi-spi-engine.c597
-rw-r--r--drivers/spi/spi-bcm-qspi.c1738
-rw-r--r--drivers/spi/spi-bcm-qspi.h104
-rw-r--r--drivers/spi/spi-bcm2835.c1449
-rw-r--r--drivers/spi/spi-bcm2835aux.c605
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c544
-rw-r--r--drivers/spi/spi-bcm63xx.c683
-rw-r--r--drivers/spi/spi-bitbang-txrx.h176
-rw-r--r--drivers/spi/spi-bitbang.c453
-rw-r--r--drivers/spi/spi-brcmstb-qspi.c42
-rw-r--r--drivers/spi/spi-butterfly.c323
-rw-r--r--drivers/spi/spi-cadence-quadspi.c1879
-rw-r--r--drivers/spi/spi-cadence-xspi.c641
-rw-r--r--drivers/spi/spi-cadence.c754
-rw-r--r--drivers/spi/spi-cavium-octeon.c102
-rw-r--r--drivers/spi/spi-cavium-thunderx.c124
-rw-r--r--drivers/spi/spi-cavium.c150
-rw-r--r--drivers/spi/spi-cavium.h333
-rw-r--r--drivers/spi/spi-clps711x.c173
-rw-r--r--drivers/spi/spi-coldfire-qspi.c519
-rw-r--r--drivers/spi/spi-davinci.c1053
-rw-r--r--drivers/spi/spi-dln2.c883
-rw-r--r--drivers/spi/spi-dw-bt1.c344
-rw-r--r--drivers/spi/spi-dw-core.c1017
-rw-r--r--drivers/spi/spi-dw-dma.c671
-rw-r--r--drivers/spi/spi-dw-mmio.c403
-rw-r--r--drivers/spi/spi-dw-pci.c215
-rw-r--r--drivers/spi/spi-dw.h307
-rw-r--r--drivers/spi/spi-ep93xx.c770
-rw-r--r--drivers/spi/spi-falcon.c432
-rw-r--r--drivers/spi/spi-fsi.c593
-rw-r--r--drivers/spi/spi-fsl-cpm.c424
-rw-r--r--drivers/spi/spi-fsl-cpm.h39
-rw-r--r--drivers/spi/spi-fsl-dspi.c1452
-rw-r--r--drivers/spi/spi-fsl-espi.c846
-rw-r--r--drivers/spi/spi-fsl-lib.c160
-rw-r--r--drivers/spi/spi-fsl-lib.h114
-rw-r--r--drivers/spi/spi-fsl-lpspi.c992
-rw-r--r--drivers/spi/spi-fsl-qspi.c1013
-rw-r--r--drivers/spi/spi-fsl-spi.c820
-rw-r--r--drivers/spi/spi-fsl-spi.h68
-rw-r--r--drivers/spi/spi-geni-qcom.c1110
-rw-r--r--drivers/spi/spi-gpio.c474
-rw-r--r--drivers/spi/spi-gxp.c321
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c555
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c542
-rw-r--r--drivers/spi/spi-img-spfi.c768
-rw-r--r--drivers/spi/spi-imx.c1960
-rw-r--r--drivers/spi/spi-ingenic.c519
-rw-r--r--drivers/spi/spi-intel-pci.c98
-rw-r--r--drivers/spi/spi-intel-platform.c39
-rw-r--r--drivers/spi/spi-intel.c1431
-rw-r--r--drivers/spi/spi-intel.h19
-rw-r--r--drivers/spi/spi-iproc-qspi.c155
-rw-r--r--drivers/spi/spi-jcore.c235
-rw-r--r--drivers/spi/spi-lantiq-ssc.c1051
-rw-r--r--drivers/spi/spi-lm70llp.c328
-rw-r--r--drivers/spi/spi-loopback-test.c1111
-rw-r--r--drivers/spi/spi-lp8841-rtc.c245
-rw-r--r--drivers/spi/spi-mem.c919
-rw-r--r--drivers/spi/spi-meson-spicc.c944
-rw-r--r--drivers/spi/spi-meson-spifc.c457
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c600
-rw-r--r--drivers/spi/spi-microchip-core.c617
-rw-r--r--drivers/spi/spi-mpc512x-psc.c616
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c455
-rw-r--r--drivers/spi/spi-mpc52xx.c550
-rw-r--r--drivers/spi/spi-mt65xx.c1423
-rw-r--r--drivers/spi/spi-mt7621.c391
-rw-r--r--drivers/spi/spi-mtk-nor.c997
-rw-r--r--drivers/spi/spi-mtk-snfi.c1472
-rw-r--r--drivers/spi/spi-mux.c202
-rw-r--r--drivers/spi/spi-mxic.c853
-rw-r--r--drivers/spi/spi-mxs.c675
-rw-r--r--drivers/spi/spi-npcm-fiu.c791
-rw-r--r--drivers/spi/spi-npcm-pspi.c464
-rw-r--r--drivers/spi/spi-nxp-fspi.c1276
-rw-r--r--drivers/spi/spi-oc-tiny.c306
-rw-r--r--drivers/spi/spi-omap-100k.c490
-rw-r--r--drivers/spi/spi-omap-uwire.c560
-rw-r--r--drivers/spi/spi-omap2-mcspi.c1617
-rw-r--r--drivers/spi/spi-orion.c867
-rw-r--r--drivers/spi/spi-pic32-sqi.c715
-rw-r--r--drivers/spi/spi-pic32.c878
-rw-r--r--drivers/spi/spi-pl022.c2453
-rw-r--r--drivers/spi/spi-ppc4xx.c499
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c243
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c346
-rw-r--r--drivers/spi/spi-pxa2xx.c1893
-rw-r--r--drivers/spi/spi-pxa2xx.h132
-rw-r--r--drivers/spi/spi-qcom-qspi.c663
-rw-r--r--drivers/spi/spi-qup.c1329
-rw-r--r--drivers/spi/spi-rb4xx.c213
-rw-r--r--drivers/spi/spi-realtek-rtl.c209
-rw-r--r--drivers/spi/spi-rockchip-sfc.c692
-rw-r--r--drivers/spi/spi-rockchip.c1086
-rw-r--r--drivers/spi/spi-rpc-if.c216
-rw-r--r--drivers/spi/spi-rspi.c1455
-rw-r--r--drivers/spi/spi-s3c24xx-regs.h41
-rw-r--r--drivers/spi/spi-s3c24xx.c596
-rw-r--r--drivers/spi/spi-s3c64xx.c1545
-rw-r--r--drivers/spi/spi-sc18is602.c348
-rw-r--r--drivers/spi/spi-sh-hspi.c309
-rw-r--r--drivers/spi/spi-sh-msiof.c1446
-rw-r--r--drivers/spi/spi-sh-sci.c197
-rw-r--r--drivers/spi/spi-sh.c474
-rw-r--r--drivers/spi/spi-sifive.c487
-rw-r--r--drivers/spi/spi-slave-mt27xx.c571
-rw-r--r--drivers/spi/spi-slave-system-control.c154
-rw-r--r--drivers/spi/spi-slave-time.c128
-rw-r--r--drivers/spi/spi-sprd-adi.c669
-rw-r--r--drivers/spi/spi-sprd.c1086
-rw-r--r--drivers/spi/spi-st-ssc4.c460
-rw-r--r--drivers/spi/spi-stm32-qspi.c982
-rw-r--r--drivers/spi/spi-stm32.c2047
-rw-r--r--drivers/spi/spi-sun4i.c551
-rw-r--r--drivers/spi/spi-sun6i.c752
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c578
-rw-r--r--drivers/spi/spi-synquacer.c830
-rw-r--r--drivers/spi/spi-tegra114.c1536
-rw-r--r--drivers/spi/spi-tegra20-sflash.c612
-rw-r--r--drivers/spi/spi-tegra20-slink.c1232
-rw-r--r--drivers/spi/spi-tegra210-quad.c1723
-rw-r--r--drivers/spi/spi-test.h128
-rw-r--r--drivers/spi/spi-ti-qspi.c947
-rw-r--r--drivers/spi/spi-tle62x0.c316
-rw-r--r--drivers/spi/spi-topcliff-pch.c1684
-rw-r--r--drivers/spi/spi-uniphier.c812
-rw-r--r--drivers/spi/spi-xcomm.c251
-rw-r--r--drivers/spi/spi-xilinx.c535
-rw-r--r--drivers/spi/spi-xlp.c449
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c153
-rw-r--r--drivers/spi/spi-zynq-qspi.c779
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1281
-rw-r--r--drivers/spi/spi.c4609
-rw-r--r--drivers/spi/spidev.c891
151 files changed, 107119 insertions, 0 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
new file mode 100644
index 000000000..15ea11ebc
--- /dev/null
+++ b/drivers/spi/Kconfig
@@ -0,0 +1,1197 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# SPI driver configuration
+#
+menuconfig SPI
+ bool "SPI support"
+ depends on HAS_IOMEM
+ help
+ The "Serial Peripheral Interface" is a low level synchronous
+ protocol. Chips that support SPI can have data transfer rates
+ up to several tens of Mbit/sec. Chips are addressed with a
+ controller and a chipselect. Most SPI slaves don't support
+ dynamic device discovery; some are even write-only or read-only.
+
+ SPI is widely used by microcontrollers to talk with sensors,
+ eeprom and flash memory, codecs and various other controller
+ chips, analog to digital (and d-to-a) converters, and more.
+ MMC and SD cards can be accessed using SPI protocol; and for
+ DataFlash cards used in MMC sockets, SPI must always be used.
+
+ SPI is one of a family of similar protocols using a four wire
+ interface (select, clock, data in, data out) including Microwire
+ (half duplex), SSP, SSI, and PSP. This driver framework should
+ work with most such devices and controllers.
+
+if SPI
+
+config SPI_DEBUG
+ bool "Debug support for SPI drivers"
+ depends on DEBUG_KERNEL
+ help
+ Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
+ sysfs, and debugfs support in SPI controller and protocol drivers.
+
+#
+# MASTER side ... talking to discrete SPI slave chips including microcontrollers
+#
+
+config SPI_MASTER
+# bool "SPI Master Support"
+ bool
+ default SPI
+ help
+ If your system has an master-capable SPI controller (which
+ provides the clock and chipselect), you can enable that
+ controller and the protocol drivers for the SPI slave chips
+ that are connected.
+
+if SPI_MASTER
+
+config SPI_MEM
+ bool "SPI memory extension"
+ help
+ Enable this option if you want to enable the SPI memory extension.
+ This extension is meant to simplify interaction with SPI memories
+ by providing a high-level interface to send memory-like commands.
+
+comment "SPI Master Controller Drivers"
+
+config SPI_ALTERA
+ tristate "Altera SPI Controller platform driver"
+ select SPI_ALTERA_CORE
+ select REGMAP_MMIO
+ help
+ This is the driver for the Altera SPI Controller.
+
+config SPI_ALTERA_CORE
+ tristate "Altera SPI Controller core code" if COMPILE_TEST
+ select REGMAP
+ help
+ "The core code for the Altera SPI Controller"
+
+config SPI_ALTERA_DFL
+ tristate "DFL bus driver for Altera SPI Controller"
+ depends on FPGA_DFL
+ select SPI_ALTERA_CORE
+ help
+ This is a Device Feature List (DFL) bus driver for the
+ Altera SPI master controller. The SPI master is connected
+ to a SPI slave to Avalon bridge in a Intel MAX BMC.
+
+config SPI_AR934X
+ tristate "Qualcomm Atheros AR934X/QCA95XX SPI controller driver"
+ depends on ATH79 || COMPILE_TEST
+ help
+ This enables support for the SPI controller present on the
+ Qualcomm Atheros AR934X/QCA95XX SoCs.
+
+config SPI_ATH79
+ tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
+ depends on ATH79 || COMPILE_TEST
+ select SPI_BITBANG
+ help
+ This enables support for the SPI controller present on the
+ Atheros AR71XX/AR724X/AR913X SoCs.
+
+config SPI_ARMADA_3700
+ tristate "Marvell Armada 3700 SPI Controller"
+ depends on (ARCH_MVEBU && OF) || COMPILE_TEST
+ help
+ This enables support for the SPI controller present on the
+ Marvell Armada 3700 SoCs.
+
+config SPI_ASPEED_SMC
+ tristate "Aspeed flash controllers in SPI mode"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on OF
+ help
+ This enables support for the Firmware Memory controller (FMC)
+ in the Aspeed AST2600, AST2500 and AST2400 SoCs when attached
+ to SPI NOR chips, and support for the SPI flash memory
+ controller (SPI) for the host firmware. The implementation
+ only supports SPI NOR.
+
+config SPI_ATMEL
+ tristate "Atmel SPI Controller"
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on OF
+ help
+ This selects a driver for the Atmel SPI Controller, present on
+ many AT91 ARM chips.
+
+config SPI_AT91_USART
+ tristate "Atmel USART Controller SPI driver"
+ depends on (ARCH_AT91 || COMPILE_TEST)
+ depends on MFD_AT91_USART
+ help
+ This selects a driver for the AT91 USART Controller as SPI Master,
+ present on AT91 and SAMA5 SoC series.
+
+config SPI_ATMEL_QUADSPI
+ tristate "Atmel Quad SPI Controller"
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports spi-mem interface.
+
+config SPI_AU1550
+ tristate "Au1550/Au1200/Au1300 SPI Controller"
+ depends on MIPS_ALCHEMY
+ select SPI_BITBANG
+ help
+ If you say yes to this option, support will be included for the
+ PSC SPI controller found on Au1550, Au1200 and Au1300 series.
+
+config SPI_AXI_SPI_ENGINE
+ tristate "Analog Devices AXI SPI Engine controller"
+ depends on HAS_IOMEM
+ help
+ This enables support for the Analog Devices AXI SPI Engine SPI controller.
+ It is part of the SPI Engine framework that is used in some Analog Devices
+ reference designs for FPGAs.
+
+config SPI_BCM2835
+ tristate "BCM2835 SPI controller"
+ depends on GPIOLIB
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ help
+ This selects a driver for the Broadcom BCM2835 SPI master.
+
+ The BCM2835 contains two types of SPI master controller; the
+ "universal SPI master", and the regular SPI controller. This driver
+ is for the regular SPI controller. Slave mode operation is not also
+ not supported.
+
+config SPI_BCM2835AUX
+ tristate "BCM2835 SPI auxiliary controller"
+ depends on ((ARCH_BCM2835 || ARCH_BRCMSTB) && GPIOLIB) || COMPILE_TEST
+ help
+ This selects a driver for the Broadcom BCM2835 SPI aux master.
+
+ The BCM2835 contains two types of SPI master controller; the
+ "universal SPI master", and the regular SPI controller.
+ This driver is for the universal/auxiliary SPI controller.
+
+config SPI_BCM63XX
+ tristate "Broadcom BCM63xx SPI controller"
+ depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ help
+ Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
+
+config SPI_BCM63XX_HSSPI
+ tristate "Broadcom BCM63XX HS SPI controller driver"
+ depends on BCM63XX || BMIPS_GENERIC || ARCH_BCMBCA || COMPILE_TEST
+ help
+ This enables support for the High Speed SPI controller present on
+ newer Broadcom BCM63XX SoCs.
+
+config SPI_BCM_QSPI
+ tristate "Broadcom BSPI and MSPI controller support"
+ depends on ARCH_BRCMSTB || ARCH_BCM || ARCH_BCM_IPROC || \
+ BMIPS_GENERIC || COMPILE_TEST
+ default ARCH_BCM_IPROC
+ help
+ Enables support for the Broadcom SPI flash and MSPI controller.
+ Select this option for any one of BRCMSTB, iProc NSP and NS2 SoCs
+ based platforms. This driver works for both SPI master for SPI NOR
+ flash device as well as MSPI device.
+
+config SPI_BITBANG
+ tristate "Utilities for Bitbanging SPI masters"
+ help
+ With a few GPIO pins, your system can bitbang the SPI protocol.
+ Select this to get SPI support through I/O pins (GPIO, parallel
+ port, etc). Or, some systems' SPI master controller drivers use
+ this code to manage the per-word or per-transfer accesses to the
+ hardware shift registers.
+
+ This is library code, and is automatically selected by drivers that
+ need it. You only need to select this explicitly to support driver
+ modules that aren't part of this kernel tree.
+
+config SPI_BUTTERFLY
+ tristate "Parallel port adapter for AVR Butterfly (DEVELOPMENT)"
+ depends on PARPORT
+ select SPI_BITBANG
+ help
+ This uses a custom parallel port cable to connect to an AVR
+ Butterfly <http://www.atmel.com/products/avr/butterfly>, an
+ inexpensive battery powered microcontroller evaluation board.
+ This same cable can be used to flash new firmware.
+
+config SPI_CADENCE
+ tristate "Cadence SPI controller"
+ help
+ This selects the Cadence SPI controller master driver
+ used by Xilinx Zynq and ZynqMP.
+
+config SPI_CADENCE_QUADSPI
+ tristate "Cadence Quad SPI controller"
+ depends on OF && (ARM || ARM64 || X86 || COMPILE_TEST)
+ help
+ Enable support for the Cadence Quad SPI Flash controller.
+
+ Cadence QSPI is a specialized controller for connecting an SPI
+ Flash over 1/2/4-bit wide bus. Enable this option if you have a
+ device with a Cadence QSPI controller and want to access the
+ Flash as an MTD device.
+
+config SPI_CADENCE_XSPI
+ tristate "Cadence XSPI controller"
+ depends on (OF || COMPILE_TEST) && HAS_IOMEM
+ depends on SPI_MEM
+ help
+ Enable support for the Cadence XSPI Flash controller.
+
+ Cadence XSPI is a specialized controller for connecting an SPI
+ Flash over upto 8bit wide bus. Enable this option if you have a
+ device with a Cadence XSPI controller and want to access the
+ Flash as an MTD device.
+
+config SPI_CLPS711X
+ tristate "CLPS711X host SPI controller"
+ depends on ARCH_CLPS711X || COMPILE_TEST
+ help
+ This enables dedicated general purpose SPI/Microwire1-compatible
+ master mode interface (SSI1) for CLPS711X-based CPUs.
+
+config SPI_COLDFIRE_QSPI
+ tristate "Freescale Coldfire QSPI controller"
+ depends on (M520x || M523x || M5249 || M525x || M527x || M528x || M532x)
+ help
+ This enables support for the Coldfire QSPI controller in master
+ mode.
+
+config SPI_DAVINCI
+ tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE
+ select SPI_BITBANG
+ help
+ SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
+
+config SPI_DESIGNWARE
+ tristate "DesignWare SPI controller core support"
+ imply SPI_MEM
+ help
+ general driver for SPI controller core from DesignWare
+
+if SPI_DESIGNWARE
+
+config SPI_DW_DMA
+ bool "DMA support for DW SPI controller"
+
+config SPI_DW_PCI
+ tristate "PCI interface driver for DW SPI core"
+ depends on PCI
+
+config SPI_DW_MMIO
+ tristate "Memory-mapped io interface driver for DW SPI core"
+ depends on HAS_IOMEM
+
+config SPI_DW_BT1
+ tristate "Baikal-T1 SPI driver for DW SPI core"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MULTIPLEXER
+ help
+ Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
+ controllers. Two of them are pretty much normal: with IRQ, DMA,
+ FIFOs of 64 words depth, 4x CSs, but the third one as being a
+ part of the Baikal-T1 System Boot Controller has got a very
+ limited resources: no IRQ, no DMA, only a single native
+ chip-select and Tx/Rx FIFO with just 8 words depth available.
+ The later one is normally connected to an external SPI-nor flash
+ of 128Mb (in general can be of bigger size).
+
+config SPI_DW_BT1_DIRMAP
+ bool "Directly mapped Baikal-T1 Boot SPI flash support"
+ depends on SPI_DW_BT1
+ help
+ Directly mapped SPI flash memory is an interface specific to the
+ Baikal-T1 System Boot Controller. It is a 16MB MMIO region, which
+ can be used to access a peripheral memory device just by
+ reading/writing data from/to it. Note that the system APB bus
+ will stall during each IO from/to the dirmap region until the
+ operation is finished. So try not to use it concurrently with
+ time-critical tasks (like the SPI memory operations implemented
+ in this driver).
+
+endif
+
+config SPI_DLN2
+ tristate "Diolan DLN-2 USB SPI adapter"
+ depends on MFD_DLN2
+ help
+ If you say yes to this option, support will be included for Diolan
+ DLN2, a USB to SPI interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-dln2.
+
+config SPI_EP93XX
+ tristate "Cirrus Logic EP93xx SPI controller"
+ depends on ARCH_EP93XX || COMPILE_TEST
+ help
+ This enables using the Cirrus EP93xx SPI controller in master
+ mode.
+
+config SPI_FALCON
+ bool "Falcon SPI controller support"
+ depends on SOC_FALCON
+ help
+ The external bus unit (EBU) found on the FALC-ON SoC has SPI
+ emulation that is designed for serial flash access. This driver
+ has only been tested with m25p80 type chips. The hardware has no
+ support for other types of SPI peripherals.
+
+config SPI_FSI
+ tristate "FSI SPI driver"
+ depends on FSI
+ help
+ This enables support for the driver for FSI bus attached SPI
+ controllers.
+
+config SPI_FSL_LPSPI
+ tristate "Freescale i.MX LPSPI controller"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ This enables Freescale i.MX LPSPI controllers in master mode.
+
+config SPI_FSL_QUADSPI
+ tristate "Freescale QSPI controller"
+ depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ Up to four flash chips can be connected on two buses with two
+ chipselects each.
+ This controller does not support generic SPI messages. It only
+ supports the high-level SPI memory interface.
+
+config SPI_GXP
+ tristate "GXP SPI driver"
+ depends on ARCH_HPE || COMPILE_TEST
+ help
+ This enables support for the driver for GXP bus attached SPI
+ controllers.
+
+config SPI_HISI_KUNPENG
+ tristate "HiSilicon SPI Controller for Kunpeng SoCs"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ help
+ This enables support for HiSilicon SPI controller found on
+ Kunpeng SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called hisi-kunpeng-spi.
+
+config SPI_HISI_SFC_V3XX
+ tristate "HiSilicon SPI NOR Flash Controller for Hi16XX chipsets"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables support for HiSilicon v3xx SPI NOR flash controller
+ found in hi16xx chipsets.
+
+config SPI_NXP_FLEXSPI
+ tristate "NXP Flex SPI controller"
+ depends on ARCH_LAYERSCAPE || HAS_IOMEM
+ help
+ This enables support for the Flex SPI controller in master mode.
+ Up to four slave devices can be connected on two buses with two
+ chipselects each.
+ This controller does not support generic SPI messages and only
+ supports the high-level SPI memory interface.
+
+config SPI_GPIO
+ tristate "GPIO-based bitbanging SPI Master"
+ depends on GPIOLIB || COMPILE_TEST
+ select SPI_BITBANG
+ help
+ This simple GPIO bitbanging SPI master uses the arch-neutral GPIO
+ interface to manage MOSI, MISO, SCK, and chipselect signals. SPI
+ slaves connected to a bus using this driver are configured as usual,
+ except that the spi_board_info.controller_data holds the GPIO number
+ for the chipselect used by this controller driver.
+
+ Note that this driver often won't achieve even 1 Mbit/sec speeds,
+ making it unusually slow for SPI. If your platform can inline
+ GPIO operations, you should be able to leverage that for better
+ speed with a custom version of this driver; see the source code.
+
+config SPI_IMG_SPFI
+ tristate "IMG SPFI controller"
+ depends on MIPS || COMPILE_TEST
+ help
+ This enables support for the SPFI master controller found on
+ IMG SoCs.
+
+config SPI_IMX
+ tristate "Freescale i.MX SPI controllers"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ This enables support for the Freescale i.MX SPI controllers.
+
+config SPI_INGENIC
+ tristate "Ingenic SoCs SPI controller"
+ depends on MACH_INGENIC || COMPILE_TEST
+ help
+ This enables support for the Ingenic SoCs SPI controller.
+
+ To compile this driver as a module, choose M here: the module
+ will be called spi-ingenic.
+
+config SPI_INTEL
+ tristate
+
+config SPI_INTEL_PCI
+ tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
+ depends on PCI
+ depends on X86 || COMPILE_TEST
+ depends on SPI_MEM
+ select SPI_INTEL
+ help
+ This enables PCI support for the Intel PCH/PCU SPI controller in
+ master mode. This controller is present in modern Intel hardware
+ and is used to hold BIOS and other persistent settings. Using
+ this driver it is possible to upgrade BIOS directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called spi-intel-pci.
+
+config SPI_INTEL_PLATFORM
+ tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
+ depends on X86 || COMPILE_TEST
+ depends on SPI_MEM
+ select SPI_INTEL
+ help
+ This enables platform support for the Intel PCH/PCU SPI
+ controller in master mode. This controller is present in modern
+ Intel hardware and is used to hold BIOS and other persistent
+ settings. Using this driver it is possible to upgrade BIOS
+ directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called spi-intel-platform.
+
+config SPI_JCORE
+ tristate "J-Core SPI Master"
+ depends on OF && (SUPERH || COMPILE_TEST)
+ help
+ This enables support for the SPI master controller in the J-Core
+ synthesizable, open source SoC.
+
+config SPI_LM70_LLP
+ tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
+ depends on PARPORT
+ select SPI_BITBANG
+ help
+ This driver supports the NS LM70 LLP Evaluation Board,
+ which interfaces to an LM70 temperature sensor using
+ a parallel port.
+
+config SPI_LP8841_RTC
+ tristate "ICP DAS LP-8841 SPI Controller for RTC"
+ depends on MACH_PXA27X_DT || COMPILE_TEST
+ help
+ This driver provides an SPI master device to drive Maxim
+ DS-1302 real time clock.
+
+ Say N here unless you plan to run the kernel on an ICP DAS
+ LP-8x4x industrial computer.
+
+config SPI_MPC52xx
+ tristate "Freescale MPC52xx SPI (non-PSC) controller support"
+ depends on PPC_MPC52xx
+ help
+ This drivers supports the MPC52xx SPI controller in master SPI
+ mode.
+
+config SPI_MPC52xx_PSC
+ tristate "Freescale MPC52xx PSC SPI controller"
+ depends on PPC_MPC52xx
+ help
+ This enables using the Freescale MPC52xx Programmable Serial
+ Controller in master SPI mode.
+
+config SPI_MPC512x_PSC
+ tristate "Freescale MPC512x PSC SPI controller"
+ depends on PPC_MPC512x
+ help
+ This enables using the Freescale MPC5121 Programmable Serial
+ Controller in SPI master mode.
+
+config SPI_FSL_LIB
+ tristate
+ depends on OF
+
+config SPI_FSL_CPM
+ tristate
+ depends on FSL_SOC
+
+config SPI_FSL_SPI
+ tristate "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
+ depends on OF
+ select SPI_FSL_LIB
+ select SPI_FSL_CPM if FSL_SOC
+ help
+ This enables using the Freescale SPI controllers in master mode.
+ MPC83xx platform uses the controller in cpu mode or CPM/QE mode.
+ MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
+ This also enables using the Aeroflex Gaisler GRLIB SPI controller in
+ master mode.
+
+config SPI_FSL_DSPI
+ tristate "Freescale DSPI controller"
+ select REGMAP_MMIO
+ depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST
+ help
+ This enables support for the Freescale DSPI controller in master
+ mode. VF610, LS1021A and ColdFire platforms uses the controller.
+
+config SPI_FSL_ESPI
+ tristate "Freescale eSPI controller"
+ depends on FSL_SOC
+ help
+ This enables using the Freescale eSPI controllers in master mode.
+ From MPC8536, 85xx platform uses the controller, and all P10xx,
+ P20xx, P30xx,P40xx, P50xx uses this controller.
+
+config SPI_MESON_SPICC
+ tristate "Amlogic Meson SPICC controller"
+ depends on COMMON_CLK
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ This enables master mode support for the SPICC (SPI communication
+ controller) available in Amlogic Meson SoCs.
+
+config SPI_MESON_SPIFC
+ tristate "Amlogic Meson SPIFC controller"
+ depends on ARCH_MESON || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This enables master mode support for the SPIFC (SPI flash
+ controller) available in Amlogic Meson SoCs.
+
+config SPI_MICROCHIP_CORE
+ tristate "Microchip FPGA SPI controllers"
+ depends on SPI_MASTER
+ help
+ This enables the SPI driver for Microchip FPGA SPI controllers.
+ Say Y or M here if you want to use the "hard" controllers on
+ PolarFire SoC.
+ If built as a module, it will be called spi-microchip-core.
+
+config SPI_MICROCHIP_CORE_QSPI
+ tristate "Microchip FPGA QSPI controllers"
+ depends on SPI_MASTER
+ help
+ This enables the QSPI driver for Microchip FPGA QSPI controllers.
+ Say Y or M here if you want to use the QSPI controllers on
+ PolarFire SoC.
+ If built as a module, it will be called spi-microchip-core-qspi.
+
+config SPI_MT65XX
+ tristate "MediaTek SPI controller"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This selects the MediaTek(R) SPI bus driver.
+ If you want to use MediaTek(R) SPI interface,
+ say Y or M here.If you are not sure, say N.
+ SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
+
+config SPI_MT7621
+ tristate "MediaTek MT7621 SPI Controller"
+ depends on RALINK || COMPILE_TEST
+ help
+ This selects a driver for the MediaTek MT7621 SPI Controller.
+
+config SPI_MTK_NOR
+ tristate "MediaTek SPI NOR controller"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This enables support for SPI NOR controller found on MediaTek
+ ARM SoCs. This is a controller specifically for SPI NOR flash.
+ It can perform generic SPI transfers up to 6 bytes via generic
+ SPI interface as well as several SPI NOR specific instructions
+ via SPI MEM interface.
+
+config SPI_MTK_SNFI
+ tristate "MediaTek SPI NAND Flash Interface"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on MTD_NAND_ECC_MEDIATEK
+ help
+ This enables support for SPI-NAND mode on the MediaTek NAND
+ Flash Interface found on MediaTek ARM SoCs. This controller
+ is implemented as a SPI-MEM controller with pipelined ECC
+ capcability.
+
+config SPI_NPCM_FIU
+ tristate "Nuvoton NPCM FLASH Interface Unit"
+ depends on ARCH_NPCM || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ help
+ This enables support for the Flash Interface Unit SPI controller
+ in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports spi-mem interface.
+
+config SPI_NPCM_PSPI
+ tristate "Nuvoton NPCM PSPI Controller"
+ depends on ARCH_NPCM || COMPILE_TEST
+ help
+ This driver provides support for Nuvoton NPCM BMC
+ Peripheral SPI controller in master mode.
+
+config SPI_LANTIQ_SSC
+ tristate "Lantiq SSC SPI controller"
+ depends on LANTIQ || X86 || COMPILE_TEST
+ help
+ This driver supports the Lantiq SSC SPI controller in master
+ mode. This controller is found on Intel (former Lantiq) SoCs like
+ the Danube, Falcon, xRX200, xRX300, Lightning Mountain.
+
+config SPI_OC_TINY
+ tristate "OpenCores tiny SPI"
+ depends on GPIOLIB || COMPILE_TEST
+ select SPI_BITBANG
+ help
+ This is the driver for OpenCores tiny SPI master controller.
+
+config SPI_OCTEON
+ tristate "Cavium OCTEON SPI controller"
+ depends on CAVIUM_OCTEON_SOC
+ help
+ SPI host driver for the hardware found on some Cavium OCTEON
+ SOCs.
+
+config SPI_OMAP_UWIRE
+ tristate "OMAP1 MicroWire"
+ depends on ARCH_OMAP1 || (ARM && COMPILE_TEST)
+ select SPI_BITBANG
+ help
+ This hooks up to the MicroWire controller on OMAP1 chips.
+
+config SPI_OMAP24XX
+ tristate "McSPI driver for OMAP"
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
+ select SG_SPLIT
+ help
+ SPI master controller for OMAP24XX and later Multichannel SPI
+ (McSPI) modules.
+
+config SPI_TI_QSPI
+ tristate "DRA7xxx QSPI controller support"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ help
+ QSPI master controller for DRA7xxx used for flash devices.
+ This device supports single, dual and quad read support, while
+ it only supports single write mode.
+
+config SPI_OMAP_100K
+ tristate "OMAP SPI 100K"
+ depends on ARCH_OMAP850 || ARCH_OMAP730 || COMPILE_TEST
+ help
+ OMAP SPI 100K master controller for omap7xx boards.
+
+config SPI_ORION
+ tristate "Orion SPI master"
+ depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
+ help
+ This enables using the SPI master controller on the Orion
+ and MVEBU chips.
+
+config SPI_PIC32
+ tristate "Microchip PIC32 series SPI"
+ depends on MACH_PIC32 || COMPILE_TEST
+ help
+ SPI driver for Microchip PIC32 SPI master controller.
+
+config SPI_PIC32_SQI
+ tristate "Microchip PIC32 Quad SPI driver"
+ depends on MACH_PIC32 || COMPILE_TEST
+ help
+ SPI driver for PIC32 Quad SPI controller.
+
+config SPI_PL022
+ tristate "ARM AMBA PL022 SSP controller"
+ depends on ARM_AMBA
+ default y if ARCH_REALVIEW
+ default y if INTEGRATOR_IMPD1
+ default y if ARCH_VERSATILE
+ help
+ This selects the ARM(R) AMBA(R) PrimeCell PL022 SSP
+ controller. If you have an embedded system with an AMBA(R)
+ bus and a PL022 controller, say Y or M here.
+
+config SPI_PPC4xx
+ tristate "PPC4xx SPI Controller"
+ depends on PPC32 && 4xx
+ select SPI_BITBANG
+ help
+ This selects a driver for the PPC4xx SPI Controller.
+
+config SPI_PXA2XX
+ tristate "PXA2xx SSP SPI master"
+ depends on ARCH_PXA || ARCH_MMP || PCI || ACPI || COMPILE_TEST
+ select PXA_SSP if ARCH_PXA || ARCH_MMP
+ help
+ This enables using a PXA2xx or Sodaville SSP port as a SPI master
+ controller. The driver can be configured to use any SSP port and
+ additional documentation can be found a Documentation/spi/pxa2xx.rst.
+
+config SPI_PXA2XX_PCI
+ def_tristate SPI_PXA2XX && PCI && COMMON_CLK
+
+config SPI_ROCKCHIP
+ tristate "Rockchip SPI controller driver"
+ help
+ This selects a driver for Rockchip SPI controller.
+
+ If you say yes to this option, support will be included for
+ RK3066, RK3188 and RK3288 families of SPI controller.
+ Rockchip SPI controller support DMA transport and PIO mode.
+ The main usecase of this controller is to use spi flash as boot
+ device.
+
+config SPI_ROCKCHIP_SFC
+ tristate "Rockchip Serial Flash Controller (SFC)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on HAS_IOMEM && HAS_DMA
+ help
+ This enables support for Rockchip serial flash controller. This
+ is a specialized controller used to access SPI flash on some
+ Rockchip SOCs.
+
+ ROCKCHIP SFC supports DMA and PIO modes. When DMA is not available,
+ the driver automatically falls back to PIO mode.
+
+config SPI_RB4XX
+ tristate "Mikrotik RB4XX SPI master"
+ depends on SPI_MASTER && ATH79
+ help
+ SPI controller driver for the Mikrotik RB4xx series boards.
+
+config SPI_RPCIF
+ tristate "Renesas RPC-IF SPI driver"
+ depends on RENESAS_RPCIF
+ help
+ SPI driver for Renesas R-Car Gen3 or RZ/G2 RPC-IF.
+
+config SPI_RSPI
+ tristate "Renesas RSPI/QSPI controller"
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
+ help
+ SPI driver for Renesas RSPI and QSPI blocks.
+
+config SPI_QCOM_QSPI
+ tristate "QTI QSPI controller"
+ depends on ARCH_QCOM
+ help
+ QSPI(Quad SPI) driver for Qualcomm QSPI controller.
+
+config SPI_QUP
+ tristate "Qualcomm SPI controller with QUP interface"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Qualcomm Universal Peripheral (QUP) core is an AHB slave that
+ provides a common data path (an output FIFO and an input FIFO)
+ for serial peripheral interface (SPI) mini-core. SPI in master
+ mode supports up to 50MHz, up to four chip selects, programmable
+ data path from 4 bits to 32 bits and numerous protocol variants.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi_qup.
+
+config SPI_QCOM_GENI
+ tristate "Qualcomm GENI based SPI controller"
+ depends on QCOM_GENI_SE
+ help
+ This driver supports GENI serial engine based SPI controller in
+ master mode on the Qualcomm Technologies Inc.'s SoCs. If you say
+ yes to this option, support will be included for the built-in SPI
+ interface on the Qualcomm Technologies Inc.'s SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-geni-qcom.
+
+config SPI_S3C24XX
+ tristate "Samsung S3C24XX series SPI"
+ depends on ARCH_S3C24XX
+ select SPI_BITBANG
+ help
+ SPI driver for Samsung S3C24XX series ARM SoCs
+
+config SPI_S3C24XX_FIQ
+ bool "S3C24XX driver with FIQ pseudo-DMA"
+ depends on SPI_S3C24XX
+ select FIQ
+ help
+ Enable FIQ support for the S3C24XX SPI driver to provide pseudo
+ DMA by using the fast-interrupt request framework, This allows
+ the driver to get DMA-like performance when there are either
+ no free DMA channels, or when doing transfers that required both
+ TX and RX data paths.
+
+config SPI_S3C64XX
+ tristate "Samsung S3C64XX/Exynos SoC series type SPI"
+ depends on (PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST)
+ help
+ SPI driver for Samsung S3C64XX, S5Pv210 and Exynos SoCs.
+ Choose Y/M here only if you build for such Samsung SoC.
+
+config SPI_SC18IS602
+ tristate "NXP SC18IS602/602B/603 I2C to SPI bridge"
+ depends on I2C
+ help
+ SPI driver for NXP SC18IS602/602B/603 I2C to SPI bridge.
+
+config SPI_SH_MSIOF
+ tristate "SuperH MSIOF SPI controller"
+ depends on HAVE_CLK
+ depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST
+ help
+ SPI driver for SuperH and SH Mobile MSIOF blocks.
+
+config SPI_SH
+ tristate "SuperH SPI controller"
+ depends on SUPERH || COMPILE_TEST
+ help
+ SPI driver for SuperH SPI blocks.
+
+config SPI_SH_SCI
+ tristate "SuperH SCI SPI controller"
+ depends on SUPERH
+ select SPI_BITBANG
+ help
+ SPI driver for SuperH SCI blocks.
+
+config SPI_SH_HSPI
+ tristate "SuperH HSPI controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ SPI driver for SuperH HSPI blocks.
+
+config SPI_SIFIVE
+ tristate "SiFive SPI controller"
+ depends on HAS_IOMEM
+ help
+ This exposes the SPI controller IP from SiFive.
+
+config SPI_SLAVE_MT27XX
+ tristate "MediaTek SPI slave device"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on SPI_SLAVE
+ help
+ This selects the MediaTek(R) SPI slave device driver.
+ If you want to use MediaTek(R) SPI slave interface,
+ say Y or M here.If you are not sure, say N.
+ SPI slave drivers for Mediatek MT27XX series ARM SoCs.
+
+config SPI_SPRD
+ tristate "Spreadtrum SPI controller"
+ depends on ARCH_SPRD || COMPILE_TEST
+ help
+ SPI driver for Spreadtrum SoCs.
+
+config SPI_SPRD_ADI
+ tristate "Spreadtrum ADI controller"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HWSPINLOCK || (COMPILE_TEST && !HWSPINLOCK)
+ help
+ ADI driver based on SPI for Spreadtrum SoCs.
+
+config SPI_STM32
+ tristate "STMicroelectronics STM32 SPI controller"
+ depends on ARCH_STM32 || COMPILE_TEST
+ help
+ SPI driver for STMicroelectronics STM32 SoCs.
+
+ STM32 SPI controller supports DMA and PIO modes. When DMA
+ is not available, the driver automatically falls back to
+ PIO mode.
+
+config SPI_STM32_QSPI
+ tristate "STMicroelectronics STM32 QUAD SPI controller"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on SPI_MEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports spi-mem interface.
+
+config SPI_ST_SSC4
+ tristate "STMicroelectronics SPI SSC-based driver"
+ depends on ARCH_STI || COMPILE_TEST
+ help
+ STMicroelectronics SoCs support for SPI. If you say yes to
+ this option, support will be included for the SSC driven SPI.
+
+config SPI_SUN4I
+ tristate "Allwinner A10 SoCs SPI controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ SPI driver for Allwinner sun4i, sun5i and sun7i SoCs
+
+config SPI_SUN6I
+ tristate "Allwinner A31 SPI controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ This enables using the SPI controller on the Allwinner A31 SoCs.
+
+config SPI_SUNPLUS_SP7021
+ tristate "Sunplus SP7021 SPI controller"
+ depends on SOC_SP7021 || COMPILE_TEST
+ help
+ This enables Sunplus SP7021 SPI controller driver on the SP7021 SoCs.
+ This driver can also be built as a module. If so, the module will be
+ called as spi-sunplus-sp7021.
+
+ If you have a Sunplus SP7021 platform say Y here.
+ If unsure, say N.
+
+config SPI_SYNQUACER
+ tristate "Socionext's SynQuacer HighSpeed SPI controller"
+ depends on ARCH_SYNQUACER || COMPILE_TEST
+ help
+ SPI driver for Socionext's High speed SPI controller which provides
+ various operating modes for interfacing to serial peripheral devices
+ that use the de-facto standard SPI protocol.
+
+ It also supports the new dual-bit and quad-bit SPI protocol.
+
+config SPI_MXIC
+ tristate "Macronix MX25F0A SPI controller"
+ depends on SPI_MASTER
+ imply MTD_NAND_ECC_MXIC
+ help
+ This selects the Macronix MX25F0A SPI controller driver.
+
+config SPI_MXS
+ tristate "Freescale MXS SPI controller"
+ depends on ARCH_MXS
+ select STMP_DEVICE
+ help
+ SPI driver for Freescale MXS devices.
+
+config SPI_TEGRA210_QUAD
+ tristate "NVIDIA Tegra QSPI Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ QSPI driver for NVIDIA Tegra QSPI Controller interface. This
+ controller is different from the SPI controller and is available
+ on Tegra SoCs starting from Tegra210.
+
+config SPI_TEGRA114
+ tristate "NVIDIA Tegra114 SPI Controller"
+ depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller
+ is different than the older SoCs SPI controller and also register interface
+ get changed with this controller.
+
+config SPI_TEGRA20_SFLASH
+ tristate "Nvidia Tegra20 Serial flash Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ SPI driver for Nvidia Tegra20 Serial flash Controller interface.
+ The main usecase of this controller is to use spi flash as boot
+ device.
+
+config SPI_TEGRA20_SLINK
+ tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
+ depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
+
+config SPI_THUNDERX
+ tristate "Cavium ThunderX SPI controller"
+ depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
+ help
+ SPI host driver for the hardware found on Cavium ThunderX
+ SOCs.
+
+config SPI_TOPCLIFF_PCH
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
+ depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
+ help
+ SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
+ used in some x86 embedded processors.
+
+ This driver also supports the ML7213/ML7223/ML7831, a companion chip
+ for the Atom E6xx series and compatible with the Intel EG20T PCH.
+
+config SPI_UNIPHIER
+ tristate "Socionext UniPhier SPI Controller"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ depends on HAS_IOMEM
+ help
+ This enables a driver for the Socionext UniPhier SoC SCSSI SPI controller.
+
+ UniPhier SoCs have SCSSI and MCSSI SPI controllers.
+ Every UniPhier SoC has SCSSI which supports single channel.
+ Older UniPhier Pro4/Pro5 also has MCSSI which support multiple channels.
+ This driver supports SCSSI only.
+
+ If your SoC supports SCSSI, say Y here.
+
+config SPI_XCOMM
+ tristate "Analog Devices AD-FMCOMMS1-EBZ SPI-I2C-bridge driver"
+ depends on I2C
+ help
+ Support for the SPI-I2C bridge found on the Analog Devices
+ AD-FMCOMMS1-EBZ board.
+
+config SPI_XILINX
+ tristate "Xilinx SPI controller common module"
+ depends on HAS_IOMEM
+ select SPI_BITBANG
+ help
+ This exposes the SPI controller IP from the Xilinx EDK.
+
+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
+ Product Specification document (DS464) for hardware details.
+
+ Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
+
+config SPI_XLP
+ tristate "Cavium ThunderX2 SPI controller driver"
+ depends on ARCH_THUNDER2 || COMPILE_TEST
+ help
+ Enable support for the SPI controller on the Cavium ThunderX2.
+ (Originally on Netlogic XLP SoCs.)
+
+ If you have a Cavium ThunderX2 platform say Y here.
+ If unsure, say N.
+
+config SPI_XTENSA_XTFPGA
+ tristate "Xtensa SPI controller for xtfpga"
+ depends on (XTENSA && XTENSA_PLATFORM_XTFPGA) || COMPILE_TEST
+ select SPI_BITBANG
+ help
+ SPI driver for xtfpga SPI master controller.
+
+ This simple SPI master controller is built into xtfpga bitstreams
+ and is used to control daughterboard audio codec. It always transfers
+ 16 bit words in SPI mode 0, automatically asserting CS on transfer
+ start and deasserting on end.
+
+config SPI_ZYNQ_QSPI
+ tristate "Xilinx Zynq QSPI controller"
+ depends on ARCH_ZYNQ || COMPILE_TEST
+ depends on SPI_MEM
+ help
+ This enables support for the Zynq Quad SPI controller
+ in master mode.
+ This controller only supports SPI memory interface.
+
+config SPI_ZYNQMP_GQSPI
+ tristate "Xilinx ZynqMP GQSPI controller"
+ depends on (SPI_MEM && HAS_DMA) || COMPILE_TEST
+ help
+ Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
+ This controller only supports SPI memory interface.
+
+config SPI_AMD
+ tristate "AMD SPI controller"
+ depends on SPI_MASTER || COMPILE_TEST
+ help
+ Enables SPI controller driver for AMD SoC.
+
+#
+# Add new SPI master controllers in alphabetical order above this line
+#
+
+comment "SPI Multiplexer support"
+
+config SPI_MUX
+ tristate "SPI multiplexer support"
+ select MULTIPLEXER
+ help
+ This adds support for SPI multiplexers. Each SPI mux will be
+ accessible as a SPI controller, the devices behind the mux will appear
+ to be chip selects on this controller. It is still necessary to
+ select one or more specific mux-controller drivers.
+
+#
+# There are lots of SPI device types, with sensors and memory
+# being probably the most widely used ones.
+#
+comment "SPI Protocol Masters"
+
+config SPI_SPIDEV
+ tristate "User mode SPI device driver support"
+ help
+ This supports user mode SPI protocol drivers.
+
+ Note that this application programming interface is EXPERIMENTAL
+ and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
+
+config SPI_LOOPBACK_TEST
+ tristate "spi loopback test framework support"
+ depends on m
+ help
+ This enables the SPI loopback testing framework driver
+
+ primarily used for development of spi_master drivers
+ and to detect regressions
+
+config SPI_TLE62X0
+ tristate "Infineon TLE62X0 (for power switching)"
+ depends on SYSFS
+ help
+ SPI driver for Infineon TLE62X0 series line driver chips,
+ such as the TLE6220, TLE6230 and TLE6240. This provides a
+ sysfs interface, with each line presented as a kind of GPIO
+ exposing both switch control and diagnostic feedback.
+
+#
+# Add new SPI protocol masters in alphabetical order above this line
+#
+
+endif # SPI_MASTER
+
+#
+# SLAVE side ... listening to other SPI masters
+#
+
+config SPI_SLAVE
+ bool "SPI slave protocol handlers"
+ help
+ If your system has a slave-capable SPI controller, you can enable
+ slave protocol handlers.
+
+if SPI_SLAVE
+
+config SPI_SLAVE_TIME
+ tristate "SPI slave handler reporting boot up time"
+ help
+ SPI slave handler responding with the time of reception of the last
+ SPI message.
+
+config SPI_SLAVE_SYSTEM_CONTROL
+ tristate "SPI slave handler controlling system state"
+ help
+ SPI slave handler to allow remote control of system reboot, power
+ off, halt, and suspend.
+
+endif # SPI_SLAVE
+
+config SPI_DYNAMIC
+ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
+
+endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
new file mode 100644
index 000000000..4b34e855c
--- /dev/null
+++ b/drivers/spi/Makefile
@@ -0,0 +1,151 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for kernel SPI drivers.
+#
+
+ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
+
+# small core, mostly translating board-specific
+# config declarations into driver model code
+obj-$(CONFIG_SPI_MASTER) += spi.o
+obj-$(CONFIG_SPI_MEM) += spi-mem.o
+obj-$(CONFIG_SPI_MUX) += spi-mux.o
+obj-$(CONFIG_SPI_SPIDEV) += spidev.o
+obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
+
+# SPI master controller drivers (bus)
+obj-$(CONFIG_SPI_ALTERA) += spi-altera-platform.o
+obj-$(CONFIG_SPI_ALTERA_CORE) += spi-altera-core.o
+obj-$(CONFIG_SPI_ALTERA_DFL) += spi-altera-dfl.o
+obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o
+obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
+obj-$(CONFIG_SPI_ASPEED_SMC) += spi-aspeed-smc.o
+obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
+obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
+obj-$(CONFIG_SPI_AT91_USART) += spi-at91-usart.o
+obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
+obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
+obj-$(CONFIG_SPI_AXI_SPI_ENGINE) += spi-axi-spi-engine.o
+obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
+obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o
+obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
+obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
+obj-$(CONFIG_SPI_BCM_QSPI) += spi-iproc-qspi.o spi-brcmstb-qspi.o spi-bcm-qspi.o
+obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
+obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
+obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
+obj-$(CONFIG_SPI_CADENCE_QUADSPI) += spi-cadence-quadspi.o
+obj-$(CONFIG_SPI_CADENCE_XSPI) += spi-cadence-xspi.o
+obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
+obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
+obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
+obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
+obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
+spi-dw-y := spi-dw-core.o
+spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o
+obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.o
+obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
+obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o
+obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
+obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
+obj-$(CONFIG_SPI_FSI) += spi-fsi.o
+obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o
+obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
+obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
+obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
+obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
+obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
+obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
+obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
+obj-$(CONFIG_SPI_GXP) += spi-gxp.o
+obj-$(CONFIG_SPI_HISI_KUNPENG) += spi-hisi-kunpeng.o
+obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
+obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
+obj-$(CONFIG_SPI_IMX) += spi-imx.o
+obj-$(CONFIG_SPI_INGENIC) += spi-ingenic.o
+obj-$(CONFIG_SPI_INTEL) += spi-intel.o
+obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o
+obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o
+obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
+obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
+obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
+obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o
+obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
+obj-$(CONFIG_SPI_MICROCHIP_CORE) += spi-microchip-core.o
+obj-$(CONFIG_SPI_MICROCHIP_CORE_QSPI) += spi-microchip-core-qspi.o
+obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
+obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
+obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
+obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
+obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
+obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o
+obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o
+obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
+obj-$(CONFIG_SPI_MXS) += spi-mxs.o
+obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
+obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
+obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
+obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
+spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
+obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
+obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
+obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
+obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
+obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
+obj-$(CONFIG_SPI_ORION) += spi-orion.o
+obj-$(CONFIG_SPI_PIC32) += spi-pic32.o
+obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o
+obj-$(CONFIG_SPI_PL022) += spi-pl022.o
+obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
+spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
+obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
+obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
+obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
+obj-$(CONFIG_SPI_QUP) += spi-qup.o
+obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
+obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o
+obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
+obj-$(CONFIG_MACH_REALTEK_RTL) += spi-realtek-rtl.o
+obj-$(CONFIG_SPI_RPCIF) += spi-rpc-if.o
+obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
+obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
+spi-s3c24xx-hw-y := spi-s3c24xx.o
+obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o
+obj-$(CONFIG_SPI_SC18IS602) += spi-sc18is602.o
+obj-$(CONFIG_SPI_SH) += spi-sh.o
+obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
+obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
+obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
+obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
+obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
+obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
+obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
+obj-$(CONFIG_SPI_STM32) += spi-stm32.o
+obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
+obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
+obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
+obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
+obj-$(CONFIG_SPI_SUNPLUS_SP7021) += spi-sunplus-sp7021.o
+obj-$(CONFIG_SPI_SYNQUACER) += spi-synquacer.o
+obj-$(CONFIG_SPI_TEGRA210_QUAD) += spi-tegra210-quad.o
+obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
+obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
+obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
+obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
+spi-thunderx-objs := spi-cavium.o spi-cavium-thunderx.o
+obj-$(CONFIG_SPI_THUNDERX) += spi-thunderx.o
+obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
+obj-$(CONFIG_SPI_UNIPHIER) += spi-uniphier.o
+obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
+obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
+obj-$(CONFIG_SPI_XLP) += spi-xlp.o
+obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
+obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o
+obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
+obj-$(CONFIG_SPI_AMD) += spi-amd.o
+
+# SPI slave protocol handlers
+obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
+obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
new file mode 100644
index 000000000..7e05b48db
--- /dev/null
+++ b/drivers/spi/atmel-quadspi.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Atmel QSPI Controller
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ * Copyright (C) 2018 Cryptera A/S
+ *
+ * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+ * Author: Piotr Bugalski <bugalski.piotr@gmail.com>
+ *
+ * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi-mem.h>
+
+/* QSPI register offsets */
+#define QSPI_CR 0x0000 /* Control Register */
+#define QSPI_MR 0x0004 /* Mode Register */
+#define QSPI_RD 0x0008 /* Receive Data Register */
+#define QSPI_TD 0x000c /* Transmit Data Register */
+#define QSPI_SR 0x0010 /* Status Register */
+#define QSPI_IER 0x0014 /* Interrupt Enable Register */
+#define QSPI_IDR 0x0018 /* Interrupt Disable Register */
+#define QSPI_IMR 0x001c /* Interrupt Mask Register */
+#define QSPI_SCR 0x0020 /* Serial Clock Register */
+
+#define QSPI_IAR 0x0030 /* Instruction Address Register */
+#define QSPI_ICR 0x0034 /* Instruction Code Register */
+#define QSPI_WICR 0x0034 /* Write Instruction Code Register */
+#define QSPI_IFR 0x0038 /* Instruction Frame Register */
+#define QSPI_RICR 0x003C /* Read Instruction Code Register */
+
+#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
+#define QSPI_SKR 0x0044 /* Scrambling Key Register */
+
+#define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
+#define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
+
+#define QSPI_VERSION 0x00FC /* Version Register */
+
+
+/* Bitfields in QSPI_CR (Control Register) */
+#define QSPI_CR_QSPIEN BIT(0)
+#define QSPI_CR_QSPIDIS BIT(1)
+#define QSPI_CR_SWRST BIT(7)
+#define QSPI_CR_LASTXFER BIT(24)
+
+/* Bitfields in QSPI_MR (Mode Register) */
+#define QSPI_MR_SMM BIT(0)
+#define QSPI_MR_LLB BIT(1)
+#define QSPI_MR_WDRBT BIT(2)
+#define QSPI_MR_SMRM BIT(3)
+#define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
+#define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
+#define QSPI_MR_CSMODE_LASTXFER (1 << 4)
+#define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
+#define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
+#define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
+#define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
+#define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
+#define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
+#define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK)
+
+/* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR */
+#define QSPI_SR_RDRF BIT(0)
+#define QSPI_SR_TDRE BIT(1)
+#define QSPI_SR_TXEMPTY BIT(2)
+#define QSPI_SR_OVRES BIT(3)
+#define QSPI_SR_CSR BIT(8)
+#define QSPI_SR_CSS BIT(9)
+#define QSPI_SR_INSTRE BIT(10)
+#define QSPI_SR_QSPIENS BIT(24)
+
+#define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
+
+/* Bitfields in QSPI_SCR (Serial Clock Register) */
+#define QSPI_SCR_CPOL BIT(0)
+#define QSPI_SCR_CPHA BIT(1)
+#define QSPI_SCR_SCBR_MASK GENMASK(15, 8)
+#define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK)
+#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
+#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
+
+/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
+#define QSPI_ICR_INST_MASK GENMASK(7, 0)
+#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
+#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
+#define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
+
+/* Bitfields in QSPI_IFR (Instruction Frame Register) */
+#define QSPI_IFR_WIDTH_MASK GENMASK(2, 0)
+#define QSPI_IFR_WIDTH_SINGLE_BIT_SPI (0 << 0)
+#define QSPI_IFR_WIDTH_DUAL_OUTPUT (1 << 0)
+#define QSPI_IFR_WIDTH_QUAD_OUTPUT (2 << 0)
+#define QSPI_IFR_WIDTH_DUAL_IO (3 << 0)
+#define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
+#define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
+#define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
+#define QSPI_IFR_INSTEN BIT(4)
+#define QSPI_IFR_ADDREN BIT(5)
+#define QSPI_IFR_OPTEN BIT(6)
+#define QSPI_IFR_DATAEN BIT(7)
+#define QSPI_IFR_OPTL_MASK GENMASK(9, 8)
+#define QSPI_IFR_OPTL_1BIT (0 << 8)
+#define QSPI_IFR_OPTL_2BIT (1 << 8)
+#define QSPI_IFR_OPTL_4BIT (2 << 8)
+#define QSPI_IFR_OPTL_8BIT (3 << 8)
+#define QSPI_IFR_ADDRL BIT(10)
+#define QSPI_IFR_TFRTYP_MEM BIT(12)
+#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
+#define QSPI_IFR_CRM BIT(14)
+#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
+#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
+
+/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
+#define QSPI_SMR_SCREN BIT(0)
+#define QSPI_SMR_RVDIS BIT(1)
+
+/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
+#define QSPI_WPMR_WPEN BIT(0)
+#define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
+#define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
+
+/* Bitfields in QSPI_WPSR (Write Protection Status Register) */
+#define QSPI_WPSR_WPVS BIT(0)
+#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
+#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+
+struct atmel_qspi_caps {
+ bool has_qspick;
+ bool has_ricr;
+};
+
+struct atmel_qspi {
+ void __iomem *regs;
+ void __iomem *mem;
+ struct clk *pclk;
+ struct clk *qspick;
+ struct platform_device *pdev;
+ const struct atmel_qspi_caps *caps;
+ resource_size_t mmap_size;
+ u32 pending;
+ u32 mr;
+ u32 scr;
+ struct completion cmd_completion;
+};
+
+struct atmel_qspi_mode {
+ u8 cmd_buswidth;
+ u8 addr_buswidth;
+ u8 data_buswidth;
+ u32 config;
+};
+
+static const struct atmel_qspi_mode atmel_qspi_modes[] = {
+ { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
+ { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
+ { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
+ { 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
+ { 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
+ { 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
+ { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
+};
+
+#ifdef VERBOSE_DEBUG
+static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
+{
+ switch (offset) {
+ case QSPI_CR:
+ return "CR";
+ case QSPI_MR:
+ return "MR";
+ case QSPI_RD:
+ return "MR";
+ case QSPI_TD:
+ return "TD";
+ case QSPI_SR:
+ return "SR";
+ case QSPI_IER:
+ return "IER";
+ case QSPI_IDR:
+ return "IDR";
+ case QSPI_IMR:
+ return "IMR";
+ case QSPI_SCR:
+ return "SCR";
+ case QSPI_IAR:
+ return "IAR";
+ case QSPI_ICR:
+ return "ICR/WICR";
+ case QSPI_IFR:
+ return "IFR";
+ case QSPI_RICR:
+ return "RICR";
+ case QSPI_SMR:
+ return "SMR";
+ case QSPI_SKR:
+ return "SKR";
+ case QSPI_WPMR:
+ return "WPMR";
+ case QSPI_WPSR:
+ return "WPSR";
+ case QSPI_VERSION:
+ return "VERSION";
+ default:
+ snprintf(tmp, sz, "0x%02x", offset);
+ break;
+ }
+
+ return tmp;
+}
+#endif /* VERBOSE_DEBUG */
+
+static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
+{
+ u32 value = readl_relaxed(aq->regs + offset);
+
+#ifdef VERBOSE_DEBUG
+ char tmp[8];
+
+ dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value,
+ atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
+#endif /* VERBOSE_DEBUG */
+
+ return value;
+}
+
+static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
+{
+#ifdef VERBOSE_DEBUG
+ char tmp[8];
+
+ dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value,
+ atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
+#endif /* VERBOSE_DEBUG */
+
+ writel_relaxed(value, aq->regs + offset);
+}
+
+static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
+ const struct atmel_qspi_mode *mode)
+{
+ if (op->cmd.buswidth != mode->cmd_buswidth)
+ return false;
+
+ if (op->addr.nbytes && op->addr.buswidth != mode->addr_buswidth)
+ return false;
+
+ if (op->data.nbytes && op->data.buswidth != mode->data_buswidth)
+ return false;
+
+ return true;
+}
+
+static int atmel_qspi_find_mode(const struct spi_mem_op *op)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
+ if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
+ return i;
+
+ return -ENOTSUPP;
+}
+
+static bool atmel_qspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (atmel_qspi_find_mode(op) < 0)
+ return false;
+
+ /* special case not supported by hardware */
+ if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
+ op->dummy.nbytes == 0)
+ return false;
+
+ return true;
+}
+
+static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
+ const struct spi_mem_op *op, u32 *offset)
+{
+ u32 iar, icr, ifr;
+ u32 dummy_cycles = 0;
+ int mode;
+
+ iar = 0;
+ icr = QSPI_ICR_INST(op->cmd.opcode);
+ ifr = QSPI_IFR_INSTEN;
+
+ mode = atmel_qspi_find_mode(op);
+ if (mode < 0)
+ return mode;
+ ifr |= atmel_qspi_modes[mode].config;
+
+ if (op->dummy.nbytes)
+ dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
+
+ /*
+ * The controller allows 24 and 32-bit addressing while NAND-flash
+ * requires 16-bit long. Handling 8-bit long addresses is done using
+ * the option field. For the 16-bit addresses, the workaround depends
+ * of the number of requested dummy bits. If there are 8 or more dummy
+ * cycles, the address is shifted and sent with the first dummy byte.
+ * Otherwise opcode is disabled and the first byte of the address
+ * contains the command opcode (works only if the opcode and address
+ * use the same buswidth). The limitation is when the 16-bit address is
+ * used without enough dummy cycles and the opcode is using a different
+ * buswidth than the address.
+ */
+ if (op->addr.buswidth) {
+ switch (op->addr.nbytes) {
+ case 0:
+ break;
+ case 1:
+ ifr |= QSPI_IFR_OPTEN | QSPI_IFR_OPTL_8BIT;
+ icr |= QSPI_ICR_OPT(op->addr.val & 0xff);
+ break;
+ case 2:
+ if (dummy_cycles < 8 / op->addr.buswidth) {
+ ifr &= ~QSPI_IFR_INSTEN;
+ ifr |= QSPI_IFR_ADDREN;
+ iar = (op->cmd.opcode << 16) |
+ (op->addr.val & 0xffff);
+ } else {
+ ifr |= QSPI_IFR_ADDREN;
+ iar = (op->addr.val << 8) & 0xffffff;
+ dummy_cycles -= 8 / op->addr.buswidth;
+ }
+ break;
+ case 3:
+ ifr |= QSPI_IFR_ADDREN;
+ iar = op->addr.val & 0xffffff;
+ break;
+ case 4:
+ ifr |= QSPI_IFR_ADDREN | QSPI_IFR_ADDRL;
+ iar = op->addr.val & 0x7ffffff;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ /* offset of the data access in the QSPI memory space */
+ *offset = iar;
+
+ /* Set number of dummy cycles */
+ if (dummy_cycles)
+ ifr |= QSPI_IFR_NBDUM(dummy_cycles);
+
+ /* Set data enable and data transfer type. */
+ if (op->data.nbytes) {
+ ifr |= QSPI_IFR_DATAEN;
+
+ if (op->addr.nbytes)
+ ifr |= QSPI_IFR_TFRTYP_MEM;
+ }
+
+ /*
+ * If the QSPI controller is set in regular SPI mode, set it in
+ * Serial Memory Mode (SMM).
+ */
+ if (aq->mr != QSPI_MR_SMM) {
+ atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
+ aq->mr = QSPI_MR_SMM;
+ }
+
+ /* Clear pending interrupts */
+ (void)atmel_qspi_read(aq, QSPI_SR);
+
+ /* Set QSPI Instruction Frame registers. */
+ if (op->addr.nbytes && !op->data.nbytes)
+ atmel_qspi_write(iar, aq, QSPI_IAR);
+
+ if (aq->caps->has_ricr) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ atmel_qspi_write(icr, aq, QSPI_RICR);
+ else
+ atmel_qspi_write(icr, aq, QSPI_WICR);
+ } else {
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
+
+ atmel_qspi_write(icr, aq, QSPI_ICR);
+ }
+
+ atmel_qspi_write(ifr, aq, QSPI_IFR);
+
+ return 0;
+}
+
+static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
+ u32 sr, offset;
+ int err;
+
+ /*
+ * Check if the address exceeds the MMIO window size. An improvement
+ * would be to add support for regular SPI mode and fall back to it
+ * when the flash memories overrun the controller's memory space.
+ */
+ if (op->addr.val + op->data.nbytes > aq->mmap_size)
+ return -ENOTSUPP;
+
+ err = pm_runtime_resume_and_get(&aq->pdev->dev);
+ if (err < 0)
+ return err;
+
+ err = atmel_qspi_set_cfg(aq, op, &offset);
+ if (err)
+ goto pm_runtime_put;
+
+ /* Skip to the final steps if there is no data */
+ if (op->data.nbytes) {
+ /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+ (void)atmel_qspi_read(aq, QSPI_IFR);
+
+ /* Send/Receive data */
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
+ else
+ memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
+
+ /* Release the chip-select */
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+ }
+
+ /* Poll INSTRuction End status */
+ sr = atmel_qspi_read(aq, QSPI_SR);
+ if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ goto pm_runtime_put;
+
+ /* Wait for INSTRuction End interrupt */
+ reinit_completion(&aq->cmd_completion);
+ aq->pending = sr & QSPI_SR_CMD_COMPLETED;
+ atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
+ if (!wait_for_completion_timeout(&aq->cmd_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
+
+pm_runtime_put:
+ pm_runtime_mark_last_busy(&aq->pdev->dev);
+ pm_runtime_put_autosuspend(&aq->pdev->dev);
+ return err;
+}
+
+static const char *atmel_qspi_get_name(struct spi_mem *spimem)
+{
+ return dev_name(spimem->spi->dev.parent);
+}
+
+static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
+ .supports_op = atmel_qspi_supports_op,
+ .exec_op = atmel_qspi_exec_op,
+ .get_name = atmel_qspi_get_name
+};
+
+static int atmel_qspi_setup(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->master;
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ unsigned long src_rate;
+ u32 scbr;
+ int ret;
+
+ if (ctrl->busy)
+ return -EBUSY;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ src_rate = clk_get_rate(aq->pclk);
+ if (!src_rate)
+ return -EINVAL;
+
+ /* Compute the QSPI baudrate */
+ scbr = DIV_ROUND_UP(src_rate, spi->max_speed_hz);
+ if (scbr > 0)
+ scbr--;
+
+ ret = pm_runtime_resume_and_get(ctrl->dev.parent);
+ if (ret < 0)
+ return ret;
+
+ aq->scr = QSPI_SCR_SCBR(scbr);
+ atmel_qspi_write(aq->scr, aq, QSPI_SCR);
+
+ pm_runtime_mark_last_busy(ctrl->dev.parent);
+ pm_runtime_put_autosuspend(ctrl->dev.parent);
+
+ return 0;
+}
+
+static void atmel_qspi_init(struct atmel_qspi *aq)
+{
+ /* Reset the QSPI controller */
+ atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
+
+ /* Set the QSPI controller by default in Serial Memory Mode */
+ atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
+ aq->mr = QSPI_MR_SMM;
+
+ /* Enable the QSPI controller */
+ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
+}
+
+static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
+{
+ struct atmel_qspi *aq = dev_id;
+ u32 status, mask, pending;
+
+ status = atmel_qspi_read(aq, QSPI_SR);
+ mask = atmel_qspi_read(aq, QSPI_IMR);
+ pending = status & mask;
+
+ if (!pending)
+ return IRQ_NONE;
+
+ aq->pending |= pending;
+ if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ complete(&aq->cmd_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int atmel_qspi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctrl;
+ struct atmel_qspi *aq;
+ struct resource *res;
+ int irq, err = 0;
+
+ ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*aq));
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctrl->setup = atmel_qspi_setup;
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &atmel_qspi_mem_ops;
+ ctrl->num_chipselect = 1;
+ ctrl->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, ctrl);
+
+ aq = spi_controller_get_devdata(ctrl);
+
+ init_completion(&aq->cmd_completion);
+ aq->pdev = pdev;
+
+ /* Map the registers */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
+ aq->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aq->regs)) {
+ dev_err(&pdev->dev, "missing registers\n");
+ return PTR_ERR(aq->regs);
+ }
+
+ /* Map the AHB memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
+ aq->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aq->mem)) {
+ dev_err(&pdev->dev, "missing AHB memory\n");
+ return PTR_ERR(aq->mem);
+ }
+
+ aq->mmap_size = resource_size(res);
+
+ /* Get the peripheral clock */
+ aq->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(aq->pclk))
+ aq->pclk = devm_clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(aq->pclk)) {
+ dev_err(&pdev->dev, "missing peripheral clock\n");
+ return PTR_ERR(aq->pclk);
+ }
+
+ /* Enable the peripheral clock */
+ err = clk_prepare_enable(aq->pclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
+ return err;
+ }
+
+ aq->caps = of_device_get_match_data(&pdev->dev);
+ if (!aq->caps) {
+ dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
+ err = -EINVAL;
+ goto disable_pclk;
+ }
+
+ if (aq->caps->has_qspick) {
+ /* Get the QSPI system clock */
+ aq->qspick = devm_clk_get(&pdev->dev, "qspick");
+ if (IS_ERR(aq->qspick)) {
+ dev_err(&pdev->dev, "missing system clock\n");
+ err = PTR_ERR(aq->qspick);
+ goto disable_pclk;
+ }
+
+ /* Enable the QSPI system clock */
+ err = clk_prepare_enable(aq->qspick);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to enable the QSPI system clock\n");
+ goto disable_pclk;
+ }
+ }
+
+ /* Request the IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto disable_qspick;
+ }
+ err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
+ 0, dev_name(&pdev->dev), aq);
+ if (err)
+ goto disable_qspick;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ atmel_qspi_init(aq);
+
+ err = spi_register_controller(ctrl);
+ if (err) {
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ goto disable_qspick;
+ }
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+disable_qspick:
+ clk_disable_unprepare(aq->qspick);
+disable_pclk:
+ clk_disable_unprepare(aq->pclk);
+
+ return err;
+}
+
+static int atmel_qspi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctrl = platform_get_drvdata(pdev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+ spi_unregister_controller(ctrl);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret >= 0) {
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+ clk_disable(aq->qspick);
+ clk_disable(aq->pclk);
+ } else {
+ /*
+ * atmel_qspi_runtime_{suspend,resume} just disable and enable
+ * the two clks respectively. So after resume failed these are
+ * off, and we skip hardware access and disabling these clks again.
+ */
+ dev_warn(&pdev->dev, "Failed to resume device on remove\n");
+ }
+
+ clk_unprepare(aq->qspick);
+ clk_unprepare(aq->pclk);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_qspi_suspend(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_force_suspend(dev);
+
+ clk_unprepare(aq->qspick);
+ clk_unprepare(aq->pclk);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_qspi_resume(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+ clk_prepare(aq->pclk);
+ clk_prepare(aq->qspick);
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ atmel_qspi_init(aq);
+
+ atmel_qspi_write(aq->scr, aq, QSPI_SCR);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+
+ clk_disable(aq->qspick);
+ clk_disable(aq->pclk);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+ ret = clk_enable(aq->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(aq->qspick);
+ if (ret)
+ clk_disable(aq->pclk);
+
+ return ret;
+}
+
+static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume)
+ SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend,
+ atmel_qspi_runtime_resume, NULL)
+};
+
+static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
+
+static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
+ .has_qspick = true,
+ .has_ricr = true,
+};
+
+static const struct of_device_id atmel_qspi_dt_ids[] = {
+ {
+ .compatible = "atmel,sama5d2-qspi",
+ .data = &atmel_sama5d2_qspi_caps,
+ },
+ {
+ .compatible = "microchip,sam9x60-qspi",
+ .data = &atmel_sam9x60_qspi_caps,
+ },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
+
+static struct platform_driver atmel_qspi_driver = {
+ .driver = {
+ .name = "atmel_qspi",
+ .of_match_table = atmel_qspi_dt_ids,
+ .pm = pm_ptr(&atmel_qspi_pm_ops),
+ },
+ .probe = atmel_qspi_probe,
+ .remove = atmel_qspi_remove,
+};
+module_platform_driver(atmel_qspi_driver);
+
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
+MODULE_AUTHOR("Piotr Bugalski <bugalski.piotr@gmail.com");
+MODULE_DESCRIPTION("Atmel QSPI Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/internals.h b/drivers/spi/internals.h
new file mode 100644
index 000000000..4a28a8395
--- /dev/null
+++ b/drivers/spi/internals.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Exceet Electronics GmbH
+ * Copyright (C) 2018 Bootlin
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ *
+ * Helpers needed by the spi or spi-mem logic. Should not be used outside of
+ * spi-mem.c and spi.c.
+ */
+
+#ifndef __LINUX_SPI_INTERNALS_H
+#define __LINUX_SPI_INTERNALS_H
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/scatterlist.h>
+#include <linux/spi/spi.h>
+
+void spi_flush_queue(struct spi_controller *ctrl);
+
+#ifdef CONFIG_HAS_DMA
+int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir);
+void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir);
+#else /* !CONFIG_HAS_DMA */
+static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ return -EINVAL;
+}
+
+static inline void spi_unmap_buf(struct spi_controller *ctlr,
+ struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+}
+#endif /* CONFIG_HAS_DMA */
+
+#endif /* __LINUX_SPI_INTERNALS_H */
diff --git a/drivers/spi/spi-altera-core.c b/drivers/spi/spi-altera-core.c
new file mode 100644
index 000000000..de4d31c53
--- /dev/null
+++ b/drivers/spi/spi-altera-core.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Altera SPI driver
+ *
+ * Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on spi_s3c24xx.c, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/altera.h>
+#include <linux/spi/spi.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#define DRV_NAME "spi_altera"
+
+#define ALTERA_SPI_RXDATA 0
+#define ALTERA_SPI_TXDATA 4
+#define ALTERA_SPI_STATUS 8
+#define ALTERA_SPI_CONTROL 12
+#define ALTERA_SPI_SLAVE_SEL 20
+
+#define ALTERA_SPI_STATUS_ROE_MSK 0x8
+#define ALTERA_SPI_STATUS_TOE_MSK 0x10
+#define ALTERA_SPI_STATUS_TMT_MSK 0x20
+#define ALTERA_SPI_STATUS_TRDY_MSK 0x40
+#define ALTERA_SPI_STATUS_RRDY_MSK 0x80
+#define ALTERA_SPI_STATUS_E_MSK 0x100
+
+#define ALTERA_SPI_CONTROL_IROE_MSK 0x8
+#define ALTERA_SPI_CONTROL_ITOE_MSK 0x10
+#define ALTERA_SPI_CONTROL_ITRDY_MSK 0x40
+#define ALTERA_SPI_CONTROL_IRRDY_MSK 0x80
+#define ALTERA_SPI_CONTROL_IE_MSK 0x100
+#define ALTERA_SPI_CONTROL_SSO_MSK 0x400
+
+static int altr_spi_writel(struct altera_spi *hw, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_write(hw->regmap, hw->regoff + reg, val);
+ if (ret)
+ dev_err(hw->dev, "fail to write reg 0x%x val 0x%x: %d\n",
+ reg, val, ret);
+
+ return ret;
+}
+
+static int altr_spi_readl(struct altera_spi *hw, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(hw->regmap, hw->regoff + reg, val);
+ if (ret)
+ dev_err(hw->dev, "fail to read reg 0x%x: %d\n", reg, ret);
+
+ return ret;
+}
+
+static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static void altera_spi_set_cs(struct spi_device *spi, bool is_high)
+{
+ struct altera_spi *hw = altera_spi_to_hw(spi);
+
+ if (is_high) {
+ hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
+ altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
+ altr_spi_writel(hw, ALTERA_SPI_SLAVE_SEL, 0);
+ } else {
+ altr_spi_writel(hw, ALTERA_SPI_SLAVE_SEL,
+ BIT(spi->chip_select));
+ hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
+ altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
+ }
+}
+
+static void altera_spi_tx_word(struct altera_spi *hw)
+{
+ unsigned int txd = 0;
+
+ if (hw->tx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ txd = hw->tx[hw->count];
+ break;
+ case 2:
+ txd = (hw->tx[hw->count * 2]
+ | (hw->tx[hw->count * 2 + 1] << 8));
+ break;
+ case 4:
+ txd = (hw->tx[hw->count * 4]
+ | (hw->tx[hw->count * 4 + 1] << 8)
+ | (hw->tx[hw->count * 4 + 2] << 16)
+ | (hw->tx[hw->count * 4 + 3] << 24));
+ break;
+
+ }
+ }
+
+ altr_spi_writel(hw, ALTERA_SPI_TXDATA, txd);
+}
+
+static void altera_spi_rx_word(struct altera_spi *hw)
+{
+ unsigned int rxd;
+
+ altr_spi_readl(hw, ALTERA_SPI_RXDATA, &rxd);
+ if (hw->rx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ hw->rx[hw->count] = rxd;
+ break;
+ case 2:
+ hw->rx[hw->count * 2] = rxd;
+ hw->rx[hw->count * 2 + 1] = rxd >> 8;
+ break;
+ case 4:
+ hw->rx[hw->count * 4] = rxd;
+ hw->rx[hw->count * 4 + 1] = rxd >> 8;
+ hw->rx[hw->count * 4 + 2] = rxd >> 16;
+ hw->rx[hw->count * 4 + 3] = rxd >> 24;
+ break;
+
+ }
+ }
+
+ hw->count++;
+}
+
+static int altera_spi_txrx(struct spi_master *master,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ struct altera_spi *hw = spi_master_get_devdata(master);
+ u32 val;
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->count = 0;
+ hw->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
+ hw->len = t->len / hw->bytes_per_word;
+
+ if (hw->irq >= 0) {
+ /* enable receive interrupt */
+ hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK;
+ altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
+
+ /* send the first byte */
+ altera_spi_tx_word(hw);
+
+ return 1;
+ }
+
+ while (hw->count < hw->len) {
+ altera_spi_tx_word(hw);
+
+ for (;;) {
+ altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+ break;
+
+ cpu_relax();
+ }
+
+ altera_spi_rx_word(hw);
+ }
+ spi_finalize_current_transfer(master);
+
+ return 0;
+}
+
+irqreturn_t altera_spi_irq(int irq, void *dev)
+{
+ struct spi_master *master = dev;
+ struct altera_spi *hw = spi_master_get_devdata(master);
+
+ altera_spi_rx_word(hw);
+
+ if (hw->count < hw->len) {
+ altera_spi_tx_word(hw);
+ } else {
+ /* disable receive interrupt */
+ hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK;
+ altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
+
+ spi_finalize_current_transfer(master);
+ }
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(altera_spi_irq);
+
+void altera_spi_init_master(struct spi_master *master)
+{
+ struct altera_spi *hw = spi_master_get_devdata(master);
+ u32 val;
+
+ master->transfer_one = altera_spi_txrx;
+ master->set_cs = altera_spi_set_cs;
+
+ /* program defaults into the registers */
+ hw->imr = 0; /* disable spi interrupts */
+ altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
+ altr_spi_writel(hw, ALTERA_SPI_STATUS, 0); /* clear status reg */
+ altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+ altr_spi_readl(hw, ALTERA_SPI_RXDATA, &val); /* flush rxdata */
+}
+EXPORT_SYMBOL_GPL(altera_spi_init_master);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c
new file mode 100644
index 000000000..596e181ae
--- /dev/null
+++ b/drivers/spi/spi-altera-dfl.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// DFL bus driver for Altera SPI Master
+//
+// Copyright (C) 2020 Intel Corporation, Inc.
+//
+// Authors:
+// Matthew Gerlach <matthew.gerlach@linux.intel.com>
+//
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/bitfield.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/altera.h>
+#include <linux/dfl.h>
+
+#define FME_FEATURE_ID_MAX10_SPI 0xe
+#define FME_FEATURE_REV_MAX10_SPI_N5010 0x1
+
+#define SPI_CORE_PARAMETER 0x8
+#define SHIFT_MODE BIT_ULL(1)
+#define SHIFT_MODE_MSB 0
+#define SHIFT_MODE_LSB 1
+#define DATA_WIDTH GENMASK_ULL(7, 2)
+#define NUM_CHIPSELECT GENMASK_ULL(13, 8)
+#define CLK_POLARITY BIT_ULL(14)
+#define CLK_PHASE BIT_ULL(15)
+#define PERIPHERAL_ID GENMASK_ULL(47, 32)
+#define SPI_CLK GENMASK_ULL(31, 22)
+#define SPI_INDIRECT_ACC_OFST 0x10
+
+#define INDIRECT_ADDR (SPI_INDIRECT_ACC_OFST+0x0)
+#define INDIRECT_WR BIT_ULL(8)
+#define INDIRECT_RD BIT_ULL(9)
+#define INDIRECT_RD_DATA (SPI_INDIRECT_ACC_OFST+0x8)
+#define INDIRECT_DATA_MASK GENMASK_ULL(31, 0)
+#define INDIRECT_DEBUG BIT_ULL(32)
+#define INDIRECT_WR_DATA (SPI_INDIRECT_ACC_OFST+0x10)
+#define INDIRECT_TIMEOUT 10000
+
+static int indirect_bus_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ void __iomem *base = context;
+ int loops;
+ u64 v;
+
+ writeq((reg >> 2) | INDIRECT_RD, base + INDIRECT_ADDR);
+
+ loops = 0;
+ while ((readq(base + INDIRECT_ADDR) & INDIRECT_RD) &&
+ (loops++ < INDIRECT_TIMEOUT))
+ cpu_relax();
+
+ if (loops >= INDIRECT_TIMEOUT) {
+ pr_err("%s timed out %d\n", __func__, loops);
+ return -ETIME;
+ }
+
+ v = readq(base + INDIRECT_RD_DATA);
+
+ *val = v & INDIRECT_DATA_MASK;
+
+ return 0;
+}
+
+static int indirect_bus_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ void __iomem *base = context;
+ int loops;
+
+ writeq(val, base + INDIRECT_WR_DATA);
+ writeq((reg >> 2) | INDIRECT_WR, base + INDIRECT_ADDR);
+
+ loops = 0;
+ while ((readq(base + INDIRECT_ADDR) & INDIRECT_WR) &&
+ (loops++ < INDIRECT_TIMEOUT))
+ cpu_relax();
+
+ if (loops >= INDIRECT_TIMEOUT) {
+ pr_err("%s timed out %d\n", __func__, loops);
+ return -ETIME;
+ }
+ return 0;
+}
+
+static const struct regmap_config indirect_regbus_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = 24,
+
+ .reg_write = indirect_bus_reg_write,
+ .reg_read = indirect_bus_reg_read,
+};
+
+static void config_spi_master(void __iomem *base, struct spi_master *master)
+{
+ u64 v;
+
+ v = readq(base + SPI_CORE_PARAMETER);
+
+ master->mode_bits = SPI_CS_HIGH;
+ if (FIELD_GET(CLK_POLARITY, v))
+ master->mode_bits |= SPI_CPOL;
+ if (FIELD_GET(CLK_PHASE, v))
+ master->mode_bits |= SPI_CPHA;
+
+ master->num_chipselect = FIELD_GET(NUM_CHIPSELECT, v);
+ master->bits_per_word_mask =
+ SPI_BPW_RANGE_MASK(1, FIELD_GET(DATA_WIDTH, v));
+}
+
+static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
+{
+ struct spi_board_info board_info = { 0 };
+ struct device *dev = &dfl_dev->dev;
+ struct spi_master *master;
+ struct altera_spi *hw;
+ void __iomem *base;
+ int err;
+
+ master = devm_spi_alloc_master(dev, sizeof(struct altera_spi));
+ if (!master)
+ return -ENOMEM;
+
+ master->bus_num = -1;
+
+ hw = spi_master_get_devdata(master);
+
+ hw->dev = dev;
+
+ base = devm_ioremap_resource(dev, &dfl_dev->mmio_res);
+
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ config_spi_master(base, master);
+ dev_dbg(dev, "%s cs %u bpm 0x%x mode 0x%x\n", __func__,
+ master->num_chipselect, master->bits_per_word_mask,
+ master->mode_bits);
+
+ hw->regmap = devm_regmap_init(dev, NULL, base, &indirect_regbus_cfg);
+ if (IS_ERR(hw->regmap))
+ return PTR_ERR(hw->regmap);
+
+ hw->irq = -EINVAL;
+
+ altera_spi_init_master(master);
+
+ err = devm_spi_register_master(dev, master);
+ if (err)
+ return dev_err_probe(dev, err, "%s failed to register spi master\n",
+ __func__);
+
+ if (dfl_dev->revision == FME_FEATURE_REV_MAX10_SPI_N5010)
+ strscpy(board_info.modalias, "m10-n5010", SPI_NAME_SIZE);
+ else
+ strscpy(board_info.modalias, "m10-d5005", SPI_NAME_SIZE);
+
+ board_info.max_speed_hz = 12500000;
+ board_info.bus_num = 0;
+ board_info.chip_select = 0;
+
+ if (!spi_new_device(master, &board_info)) {
+ dev_err(dev, "%s failed to create SPI device: %s\n",
+ __func__, board_info.modalias);
+ }
+
+ return 0;
+}
+
+static const struct dfl_device_id dfl_spi_altera_ids[] = {
+ { FME_ID, FME_FEATURE_ID_MAX10_SPI },
+ { }
+};
+
+static struct dfl_driver dfl_spi_altera_driver = {
+ .drv = {
+ .name = "dfl-spi-altera",
+ },
+ .id_table = dfl_spi_altera_ids,
+ .probe = dfl_spi_altera_probe,
+};
+
+module_dfl_driver(dfl_spi_altera_driver);
+
+MODULE_DEVICE_TABLE(dfl, dfl_spi_altera_ids);
+MODULE_DESCRIPTION("DFL spi altera driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-altera-platform.c b/drivers/spi/spi-altera-platform.c
new file mode 100644
index 000000000..65147aae8
--- /dev/null
+++ b/drivers/spi/spi-altera-platform.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Altera SPI driver
+ *
+ * Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on spi_s3c24xx.c, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/altera.h>
+#include <linux/spi/spi.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#define DRV_NAME "spi_altera"
+
+enum altera_spi_type {
+ ALTERA_SPI_TYPE_UNKNOWN,
+ ALTERA_SPI_TYPE_SUBDEV,
+};
+
+static const struct regmap_config spi_altera_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static int altera_spi_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
+ struct altera_spi_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ enum altera_spi_type type = ALTERA_SPI_TYPE_UNKNOWN;
+ struct altera_spi *hw;
+ struct spi_master *master;
+ int err = -ENODEV;
+ u16 i;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi));
+ if (!master)
+ return err;
+
+ /* setup the master state. */
+ master->bus_num = -1;
+
+ if (pdata) {
+ if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
+ dev_err(&pdev->dev,
+ "Invalid number of chipselect: %u\n",
+ pdata->num_chipselect);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ master->num_chipselect = pdata->num_chipselect;
+ master->mode_bits = pdata->mode_bits;
+ master->bits_per_word_mask = pdata->bits_per_word_mask;
+ } else {
+ master->num_chipselect = 16;
+ master->mode_bits = SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ }
+
+ master->dev.of_node = pdev->dev.of_node;
+
+ hw = spi_master_get_devdata(master);
+ hw->dev = &pdev->dev;
+
+ if (platid)
+ type = platid->driver_data;
+
+ /* find and map our resources */
+ if (type == ALTERA_SPI_TYPE_SUBDEV) {
+ struct resource *regoff;
+
+ hw->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!hw->regmap) {
+ dev_err(&pdev->dev, "get regmap failed\n");
+ goto exit;
+ }
+
+ regoff = platform_get_resource(pdev, IORESOURCE_REG, 0);
+ if (regoff)
+ hw->regoff = regoff->start;
+ } else {
+ void __iomem *res;
+
+ res = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(res)) {
+ err = PTR_ERR(res);
+ goto exit;
+ }
+
+ hw->regmap = devm_regmap_init_mmio(&pdev->dev, res,
+ &spi_altera_config);
+ if (IS_ERR(hw->regmap)) {
+ dev_err(&pdev->dev, "regmap mmio init failed\n");
+ err = PTR_ERR(hw->regmap);
+ goto exit;
+ }
+ }
+
+ altera_spi_init_master(master);
+
+ /* irq is optional */
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq >= 0) {
+ err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0,
+ pdev->name, master);
+ if (err)
+ goto exit;
+ }
+
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err)
+ goto exit;
+
+ if (pdata) {
+ for (i = 0; i < pdata->num_devices; i++) {
+ if (!spi_new_device(master, pdata->devices + i))
+ dev_warn(&pdev->dev,
+ "unable to create SPI device: %s\n",
+ pdata->devices[i].modalias);
+ }
+ }
+
+ dev_info(&pdev->dev, "regoff %u, irq %d\n", hw->regoff, hw->irq);
+
+ return 0;
+exit:
+ spi_master_put(master);
+ return err;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id altera_spi_match[] = {
+ { .compatible = "ALTR,spi-1.0", },
+ { .compatible = "altr,spi-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_spi_match);
+#endif /* CONFIG_OF */
+
+static const struct platform_device_id altera_spi_ids[] = {
+ { DRV_NAME, ALTERA_SPI_TYPE_UNKNOWN },
+ { "subdev_spi_altera", ALTERA_SPI_TYPE_SUBDEV },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, altera_spi_ids);
+
+static struct platform_driver altera_spi_driver = {
+ .probe = altera_spi_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = NULL,
+ .of_match_table = of_match_ptr(altera_spi_match),
+ },
+ .id_table = altera_spi_ids,
+};
+module_platform_driver(altera_spi_driver);
+
+MODULE_DESCRIPTION("Altera SPI driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
new file mode 100644
index 000000000..bfc3ab5f3
--- /dev/null
+++ b/drivers/spi/spi-amd.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+//
+// AMD SPI controller driver
+//
+// Copyright (c) 2020, Advanced Micro Devices, Inc.
+//
+// Author: Sanjay R Mehta <sanju.mehta@amd.com>
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/iopoll.h>
+
+#define AMD_SPI_CTRL0_REG 0x00
+#define AMD_SPI_EXEC_CMD BIT(16)
+#define AMD_SPI_FIFO_CLEAR BIT(20)
+#define AMD_SPI_BUSY BIT(31)
+
+#define AMD_SPI_OPCODE_REG 0x45
+#define AMD_SPI_CMD_TRIGGER_REG 0x47
+#define AMD_SPI_TRIGGER_CMD BIT(7)
+
+#define AMD_SPI_OPCODE_MASK 0xFF
+
+#define AMD_SPI_ALT_CS_REG 0x1D
+#define AMD_SPI_ALT_CS_MASK 0x3
+
+#define AMD_SPI_FIFO_BASE 0x80
+#define AMD_SPI_TX_COUNT_REG 0x48
+#define AMD_SPI_RX_COUNT_REG 0x4B
+#define AMD_SPI_STATUS_REG 0x4C
+
+#define AMD_SPI_FIFO_SIZE 70
+#define AMD_SPI_MEM_SIZE 200
+
+#define AMD_SPI_ENA_REG 0x20
+#define AMD_SPI_ALT_SPD_SHIFT 20
+#define AMD_SPI_ALT_SPD_MASK GENMASK(23, AMD_SPI_ALT_SPD_SHIFT)
+#define AMD_SPI_SPI100_SHIFT 0
+#define AMD_SPI_SPI100_MASK GENMASK(AMD_SPI_SPI100_SHIFT, AMD_SPI_SPI100_SHIFT)
+#define AMD_SPI_SPEED_REG 0x6C
+#define AMD_SPI_SPD7_SHIFT 8
+#define AMD_SPI_SPD7_MASK GENMASK(13, AMD_SPI_SPD7_SHIFT)
+
+#define AMD_SPI_MAX_HZ 100000000
+#define AMD_SPI_MIN_HZ 800000
+
+/**
+ * enum amd_spi_versions - SPI controller versions
+ * @AMD_SPI_V1: AMDI0061 hardware version
+ * @AMD_SPI_V2: AMDI0062 hardware version
+ */
+enum amd_spi_versions {
+ AMD_SPI_V1 = 1,
+ AMD_SPI_V2,
+};
+
+enum amd_spi_speed {
+ F_66_66MHz,
+ F_33_33MHz,
+ F_22_22MHz,
+ F_16_66MHz,
+ F_100MHz,
+ F_800KHz,
+ SPI_SPD7 = 0x7,
+ F_50MHz = 0x4,
+ F_4MHz = 0x32,
+ F_3_17MHz = 0x3F
+};
+
+/**
+ * struct amd_spi_freq - Matches device speed with values to write in regs
+ * @speed_hz: Device frequency
+ * @enable_val: Value to be written to "enable register"
+ * @spd7_val: Some frequencies requires to have a value written at SPISPEED register
+ */
+struct amd_spi_freq {
+ u32 speed_hz;
+ u32 enable_val;
+ u32 spd7_val;
+};
+
+/**
+ * struct amd_spi - SPI driver instance
+ * @io_remap_addr: Start address of the SPI controller registers
+ * @version: SPI controller hardware version
+ * @speed_hz: Device frequency
+ */
+struct amd_spi {
+ void __iomem *io_remap_addr;
+ enum amd_spi_versions version;
+ unsigned int speed_hz;
+};
+
+static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
+{
+ return ioread8((u8 __iomem *)amd_spi->io_remap_addr + idx);
+}
+
+static inline void amd_spi_writereg8(struct amd_spi *amd_spi, int idx, u8 val)
+{
+ iowrite8(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
+}
+
+static void amd_spi_setclear_reg8(struct amd_spi *amd_spi, int idx, u8 set, u8 clear)
+{
+ u8 tmp = amd_spi_readreg8(amd_spi, idx);
+
+ tmp = (tmp & ~clear) | set;
+ amd_spi_writereg8(amd_spi, idx, tmp);
+}
+
+static inline u32 amd_spi_readreg32(struct amd_spi *amd_spi, int idx)
+{
+ return ioread32((u8 __iomem *)amd_spi->io_remap_addr + idx);
+}
+
+static inline void amd_spi_writereg32(struct amd_spi *amd_spi, int idx, u32 val)
+{
+ iowrite32(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
+}
+
+static inline void amd_spi_setclear_reg32(struct amd_spi *amd_spi, int idx, u32 set, u32 clear)
+{
+ u32 tmp = amd_spi_readreg32(amd_spi, idx);
+
+ tmp = (tmp & ~clear) | set;
+ amd_spi_writereg32(amd_spi, idx, tmp);
+}
+
+static void amd_spi_select_chip(struct amd_spi *amd_spi, u8 cs)
+{
+ amd_spi_setclear_reg8(amd_spi, AMD_SPI_ALT_CS_REG, cs, AMD_SPI_ALT_CS_MASK);
+}
+
+static inline void amd_spi_clear_chip(struct amd_spi *amd_spi, u8 chip_select)
+{
+ amd_spi_writereg8(amd_spi, AMD_SPI_ALT_CS_REG, chip_select & ~AMD_SPI_ALT_CS_MASK);
+}
+
+static void amd_spi_clear_fifo_ptr(struct amd_spi *amd_spi)
+{
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR, AMD_SPI_FIFO_CLEAR);
+}
+
+static int amd_spi_set_opcode(struct amd_spi *amd_spi, u8 cmd_opcode)
+{
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, cmd_opcode,
+ AMD_SPI_OPCODE_MASK);
+ return 0;
+ case AMD_SPI_V2:
+ amd_spi_writereg8(amd_spi, AMD_SPI_OPCODE_REG, cmd_opcode);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static inline void amd_spi_set_rx_count(struct amd_spi *amd_spi, u8 rx_count)
+{
+ amd_spi_setclear_reg8(amd_spi, AMD_SPI_RX_COUNT_REG, rx_count, 0xff);
+}
+
+static inline void amd_spi_set_tx_count(struct amd_spi *amd_spi, u8 tx_count)
+{
+ amd_spi_setclear_reg8(amd_spi, AMD_SPI_TX_COUNT_REG, tx_count, 0xff);
+}
+
+static int amd_spi_busy_wait(struct amd_spi *amd_spi)
+{
+ u32 val;
+ int reg;
+
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ reg = AMD_SPI_CTRL0_REG;
+ break;
+ case AMD_SPI_V2:
+ reg = AMD_SPI_STATUS_REG;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return readl_poll_timeout(amd_spi->io_remap_addr + reg, val,
+ !(val & AMD_SPI_BUSY), 20, 2000000);
+}
+
+static int amd_spi_execute_opcode(struct amd_spi *amd_spi)
+{
+ int ret;
+
+ ret = amd_spi_busy_wait(amd_spi);
+ if (ret)
+ return ret;
+
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ /* Set ExecuteOpCode bit in the CTRL0 register */
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD,
+ AMD_SPI_EXEC_CMD);
+ return 0;
+ case AMD_SPI_V2:
+ /* Trigger the command execution */
+ amd_spi_setclear_reg8(amd_spi, AMD_SPI_CMD_TRIGGER_REG,
+ AMD_SPI_TRIGGER_CMD, AMD_SPI_TRIGGER_CMD);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int amd_spi_master_setup(struct spi_device *spi)
+{
+ struct amd_spi *amd_spi = spi_master_get_devdata(spi->master);
+
+ amd_spi_clear_fifo_ptr(amd_spi);
+
+ return 0;
+}
+
+static const struct amd_spi_freq amd_spi_freq[] = {
+ { AMD_SPI_MAX_HZ, F_100MHz, 0},
+ { 66660000, F_66_66MHz, 0},
+ { 50000000, SPI_SPD7, F_50MHz},
+ { 33330000, F_33_33MHz, 0},
+ { 22220000, F_22_22MHz, 0},
+ { 16660000, F_16_66MHz, 0},
+ { 4000000, SPI_SPD7, F_4MHz},
+ { 3170000, SPI_SPD7, F_3_17MHz},
+ { AMD_SPI_MIN_HZ, F_800KHz, 0},
+};
+
+static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
+{
+ unsigned int i, spd7_val, alt_spd;
+
+ if (speed_hz < AMD_SPI_MIN_HZ)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++)
+ if (speed_hz >= amd_spi_freq[i].speed_hz)
+ break;
+
+ if (amd_spi->speed_hz == amd_spi_freq[i].speed_hz)
+ return 0;
+
+ amd_spi->speed_hz = amd_spi_freq[i].speed_hz;
+
+ alt_spd = (amd_spi_freq[i].enable_val << AMD_SPI_ALT_SPD_SHIFT)
+ & AMD_SPI_ALT_SPD_MASK;
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_ENA_REG, alt_spd,
+ AMD_SPI_ALT_SPD_MASK);
+
+ if (amd_spi->speed_hz == AMD_SPI_MAX_HZ)
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_ENA_REG, 1,
+ AMD_SPI_SPI100_MASK);
+
+ if (amd_spi_freq[i].spd7_val) {
+ spd7_val = (amd_spi_freq[i].spd7_val << AMD_SPI_SPD7_SHIFT)
+ & AMD_SPI_SPD7_MASK;
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_SPEED_REG, spd7_val,
+ AMD_SPI_SPD7_MASK);
+ }
+
+ return 0;
+}
+
+static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
+ struct spi_master *master,
+ struct spi_message *message)
+{
+ struct spi_transfer *xfer = NULL;
+ struct spi_device *spi = message->spi;
+ u8 cmd_opcode = 0, fifo_pos = AMD_SPI_FIFO_BASE;
+ u8 *buf = NULL;
+ u32 i = 0;
+ u32 tx_len = 0, rx_len = 0;
+
+ list_for_each_entry(xfer, &message->transfers,
+ transfer_list) {
+ if (xfer->speed_hz)
+ amd_set_spi_freq(amd_spi, xfer->speed_hz);
+ else
+ amd_set_spi_freq(amd_spi, spi->max_speed_hz);
+
+ if (xfer->tx_buf) {
+ buf = (u8 *)xfer->tx_buf;
+ if (!tx_len) {
+ cmd_opcode = *(u8 *)xfer->tx_buf;
+ buf++;
+ xfer->len--;
+ }
+ tx_len += xfer->len;
+
+ /* Write data into the FIFO. */
+ for (i = 0; i < xfer->len; i++)
+ amd_spi_writereg8(amd_spi, fifo_pos + i, buf[i]);
+
+ fifo_pos += xfer->len;
+ }
+
+ /* Store no. of bytes to be received from FIFO */
+ if (xfer->rx_buf)
+ rx_len += xfer->len;
+ }
+
+ if (!buf) {
+ message->status = -EINVAL;
+ goto fin_msg;
+ }
+
+ amd_spi_set_opcode(amd_spi, cmd_opcode);
+ amd_spi_set_tx_count(amd_spi, tx_len);
+ amd_spi_set_rx_count(amd_spi, rx_len);
+
+ /* Execute command */
+ message->status = amd_spi_execute_opcode(amd_spi);
+ if (message->status)
+ goto fin_msg;
+
+ if (rx_len) {
+ message->status = amd_spi_busy_wait(amd_spi);
+ if (message->status)
+ goto fin_msg;
+
+ list_for_each_entry(xfer, &message->transfers, transfer_list)
+ if (xfer->rx_buf) {
+ buf = (u8 *)xfer->rx_buf;
+ /* Read data from FIFO to receive buffer */
+ for (i = 0; i < xfer->len; i++)
+ buf[i] = amd_spi_readreg8(amd_spi, fifo_pos + i);
+ fifo_pos += xfer->len;
+ }
+ }
+
+ /* Update statistics */
+ message->actual_length = tx_len + rx_len + 1;
+
+fin_msg:
+ switch (amd_spi->version) {
+ case AMD_SPI_V1:
+ break;
+ case AMD_SPI_V2:
+ amd_spi_clear_chip(amd_spi, message->spi->chip_select);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ spi_finalize_current_message(master);
+
+ return message->status;
+}
+
+static int amd_spi_master_transfer(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct amd_spi *amd_spi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+
+ amd_spi_select_chip(amd_spi, spi->chip_select);
+
+ /*
+ * Extract spi_transfers from the spi message and
+ * program the controller.
+ */
+ return amd_spi_fifo_xfer(amd_spi, master, msg);
+}
+
+static size_t amd_spi_max_transfer_size(struct spi_device *spi)
+{
+ return AMD_SPI_FIFO_SIZE;
+}
+
+static int amd_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct amd_spi *amd_spi;
+ int err;
+
+ /* Allocate storage for spi_master and driver private data */
+ master = devm_spi_alloc_master(dev, sizeof(struct amd_spi));
+ if (!master)
+ return dev_err_probe(dev, -ENOMEM, "Error allocating SPI master\n");
+
+ amd_spi = spi_master_get_devdata(master);
+ amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(amd_spi->io_remap_addr))
+ return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr),
+ "ioremap of SPI registers failed\n");
+
+ dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
+
+ amd_spi->version = (enum amd_spi_versions) device_get_match_data(dev);
+
+ /* Initialize the spi_master fields */
+ master->bus_num = 0;
+ master->num_chipselect = 4;
+ master->mode_bits = 0;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->max_speed_hz = AMD_SPI_MAX_HZ;
+ master->min_speed_hz = AMD_SPI_MIN_HZ;
+ master->setup = amd_spi_master_setup;
+ master->transfer_one_message = amd_spi_master_transfer;
+ master->max_transfer_size = amd_spi_max_transfer_size;
+ master->max_message_size = amd_spi_max_transfer_size;
+
+ /* Register the controller with SPI framework */
+ err = devm_spi_register_master(dev, master);
+ if (err)
+ return dev_err_probe(dev, err, "error registering SPI controller\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id spi_acpi_match[] = {
+ { "AMDI0061", AMD_SPI_V1 },
+ { "AMDI0062", AMD_SPI_V2 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, spi_acpi_match);
+#endif
+
+static struct platform_driver amd_spi_driver = {
+ .driver = {
+ .name = "amd_spi",
+ .acpi_match_table = ACPI_PTR(spi_acpi_match),
+ },
+ .probe = amd_spi_probe,
+};
+
+module_platform_driver(amd_spi_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Sanjay Mehta <sanju.mehta@amd.com>");
+MODULE_DESCRIPTION("AMD SPI Master Controller Driver");
diff --git a/drivers/spi/spi-ar934x.c b/drivers/spi/spi-ar934x.c
new file mode 100644
index 000000000..ec7250c4c
--- /dev/null
+++ b/drivers/spi/spi-ar934x.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// SPI controller driver for Qualcomm Atheros AR934x/QCA95xx SoCs
+//
+// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
+//
+// Based on spi-mt7621.c:
+// Copyright (C) 2011 Sergiy <piratfm@gmail.com>
+// Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+// Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "spi-ar934x"
+
+#define AR934X_SPI_REG_FS 0x00
+#define AR934X_SPI_ENABLE BIT(0)
+
+#define AR934X_SPI_REG_IOC 0x08
+#define AR934X_SPI_IOC_INITVAL 0x70000
+
+#define AR934X_SPI_REG_CTRL 0x04
+#define AR934X_SPI_CLK_MASK GENMASK(5, 0)
+
+#define AR934X_SPI_DATAOUT 0x10
+
+#define AR934X_SPI_REG_SHIFT_CTRL 0x14
+#define AR934X_SPI_SHIFT_EN BIT(31)
+#define AR934X_SPI_SHIFT_CS(n) BIT(28 + (n))
+#define AR934X_SPI_SHIFT_TERM 26
+#define AR934X_SPI_SHIFT_VAL(cs, term, count) \
+ (AR934X_SPI_SHIFT_EN | AR934X_SPI_SHIFT_CS(cs) | \
+ (term) << AR934X_SPI_SHIFT_TERM | (count))
+
+#define AR934X_SPI_DATAIN 0x18
+
+struct ar934x_spi {
+ struct spi_controller *ctlr;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int clk_freq;
+};
+
+static inline int ar934x_spi_clk_div(struct ar934x_spi *sp, unsigned int freq)
+{
+ int div = DIV_ROUND_UP(sp->clk_freq, freq * 2) - 1;
+
+ if (div < 0)
+ return 0;
+ else if (div > AR934X_SPI_CLK_MASK)
+ return -EINVAL;
+ else
+ return div;
+}
+
+static int ar934x_spi_setup(struct spi_device *spi)
+{
+ struct ar934x_spi *sp = spi_controller_get_devdata(spi->master);
+
+ if ((spi->max_speed_hz == 0) ||
+ (spi->max_speed_hz > (sp->clk_freq / 2))) {
+ spi->max_speed_hz = sp->clk_freq / 2;
+ } else if (spi->max_speed_hz < (sp->clk_freq / 128)) {
+ dev_err(&spi->dev, "spi clock is too low\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ar934x_spi_transfer_one_message(struct spi_controller *master,
+ struct spi_message *m)
+{
+ struct ar934x_spi *sp = spi_controller_get_devdata(master);
+ struct spi_transfer *t = NULL;
+ struct spi_device *spi = m->spi;
+ unsigned long trx_done, trx_cur;
+ int stat = 0;
+ u8 bpw, term = 0;
+ int div, i;
+ u32 reg;
+ const u8 *tx_buf;
+ u8 *buf;
+
+ m->actual_length = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word >= 8 && t->bits_per_word < 32)
+ bpw = t->bits_per_word >> 3;
+ else
+ bpw = 4;
+
+ if (t->speed_hz)
+ div = ar934x_spi_clk_div(sp, t->speed_hz);
+ else
+ div = ar934x_spi_clk_div(sp, spi->max_speed_hz);
+ if (div < 0) {
+ stat = -EIO;
+ goto msg_done;
+ }
+
+ reg = ioread32(sp->base + AR934X_SPI_REG_CTRL);
+ reg &= ~AR934X_SPI_CLK_MASK;
+ reg |= div;
+ iowrite32(reg, sp->base + AR934X_SPI_REG_CTRL);
+ iowrite32(0, sp->base + AR934X_SPI_DATAOUT);
+
+ for (trx_done = 0; trx_done < t->len; trx_done += bpw) {
+ trx_cur = t->len - trx_done;
+ if (trx_cur > bpw)
+ trx_cur = bpw;
+ else if (list_is_last(&t->transfer_list, &m->transfers))
+ term = 1;
+
+ if (t->tx_buf) {
+ tx_buf = t->tx_buf + trx_done;
+ reg = tx_buf[0];
+ for (i = 1; i < trx_cur; i++)
+ reg = reg << 8 | tx_buf[i];
+ iowrite32(reg, sp->base + AR934X_SPI_DATAOUT);
+ }
+
+ reg = AR934X_SPI_SHIFT_VAL(spi->chip_select, term,
+ trx_cur * 8);
+ iowrite32(reg, sp->base + AR934X_SPI_REG_SHIFT_CTRL);
+ stat = readl_poll_timeout(
+ sp->base + AR934X_SPI_REG_SHIFT_CTRL, reg,
+ !(reg & AR934X_SPI_SHIFT_EN), 0, 5);
+ if (stat < 0)
+ goto msg_done;
+
+ if (t->rx_buf) {
+ reg = ioread32(sp->base + AR934X_SPI_DATAIN);
+ buf = t->rx_buf + trx_done;
+ for (i = 0; i < trx_cur; i++) {
+ buf[trx_cur - i - 1] = reg & 0xff;
+ reg >>= 8;
+ }
+ }
+ spi_delay_exec(&t->word_delay, t);
+ }
+ m->actual_length += t->len;
+ spi_transfer_delay_exec(t);
+ }
+
+msg_done:
+ m->status = stat;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static const struct of_device_id ar934x_spi_match[] = {
+ { .compatible = "qca,ar934x-spi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ar934x_spi_match);
+
+static int ar934x_spi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct ar934x_spi *sp;
+ void __iomem *base;
+ struct clk *clk;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp));
+ if (!ctlr) {
+ dev_info(&pdev->dev, "failed to allocate spi controller\n");
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ /* disable flash mapping and expose spi controller registers */
+ iowrite32(AR934X_SPI_ENABLE, base + AR934X_SPI_REG_FS);
+ /* restore pins to default state: CSn=1 DO=CLK=0 */
+ iowrite32(AR934X_SPI_IOC_INITVAL, base + AR934X_SPI_REG_IOC);
+
+ ctlr->mode_bits = SPI_LSB_FIRST;
+ ctlr->setup = ar934x_spi_setup;
+ ctlr->transfer_one_message = ar934x_spi_transfer_one_message;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->num_chipselect = 3;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+
+ sp = spi_controller_get_devdata(ctlr);
+ sp->base = base;
+ sp->clk = clk;
+ sp->clk_freq = clk_get_rate(clk);
+ sp->ctlr = ctlr;
+
+ ret = spi_register_controller(ctlr);
+ if (!ret)
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(clk);
+ return ret;
+}
+
+static int ar934x_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct ar934x_spi *sp;
+
+ ctlr = dev_get_drvdata(&pdev->dev);
+ sp = spi_controller_get_devdata(ctlr);
+
+ spi_unregister_controller(ctlr);
+ clk_disable_unprepare(sp->clk);
+
+ return 0;
+}
+
+static struct platform_driver ar934x_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ar934x_spi_match,
+ },
+ .probe = ar934x_spi_probe,
+ .remove = ar934x_spi_remove,
+};
+
+module_platform_driver(ar934x_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Qualcomm Atheros AR934x/QCA95xx");
+MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
new file mode 100644
index 000000000..9df9fc40b
--- /dev/null
+++ b/drivers/spi/spi-armada-3700.c
@@ -0,0 +1,935 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Marvell Armada-3700 SPI controller driver
+ *
+ * Copyright (C) 2016 Marvell Ltd.
+ *
+ * Author: Wilson Ding <dingwei@marvell.com>
+ * Author: Romain Perier <romain.perier@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "armada_3700_spi"
+
+#define A3700_SPI_MAX_SPEED_HZ 100000000
+#define A3700_SPI_MAX_PRESCALE 30
+#define A3700_SPI_TIMEOUT 10
+
+/* SPI Register Offest */
+#define A3700_SPI_IF_CTRL_REG 0x00
+#define A3700_SPI_IF_CFG_REG 0x04
+#define A3700_SPI_DATA_OUT_REG 0x08
+#define A3700_SPI_DATA_IN_REG 0x0C
+#define A3700_SPI_IF_INST_REG 0x10
+#define A3700_SPI_IF_ADDR_REG 0x14
+#define A3700_SPI_IF_RMODE_REG 0x18
+#define A3700_SPI_IF_HDR_CNT_REG 0x1C
+#define A3700_SPI_IF_DIN_CNT_REG 0x20
+#define A3700_SPI_IF_TIME_REG 0x24
+#define A3700_SPI_INT_STAT_REG 0x28
+#define A3700_SPI_INT_MASK_REG 0x2C
+
+/* A3700_SPI_IF_CTRL_REG */
+#define A3700_SPI_EN BIT(16)
+#define A3700_SPI_ADDR_NOT_CONFIG BIT(12)
+#define A3700_SPI_WFIFO_OVERFLOW BIT(11)
+#define A3700_SPI_WFIFO_UNDERFLOW BIT(10)
+#define A3700_SPI_RFIFO_OVERFLOW BIT(9)
+#define A3700_SPI_RFIFO_UNDERFLOW BIT(8)
+#define A3700_SPI_WFIFO_FULL BIT(7)
+#define A3700_SPI_WFIFO_EMPTY BIT(6)
+#define A3700_SPI_RFIFO_FULL BIT(5)
+#define A3700_SPI_RFIFO_EMPTY BIT(4)
+#define A3700_SPI_WFIFO_RDY BIT(3)
+#define A3700_SPI_RFIFO_RDY BIT(2)
+#define A3700_SPI_XFER_RDY BIT(1)
+#define A3700_SPI_XFER_DONE BIT(0)
+
+/* A3700_SPI_IF_CFG_REG */
+#define A3700_SPI_WFIFO_THRS BIT(28)
+#define A3700_SPI_RFIFO_THRS BIT(24)
+#define A3700_SPI_AUTO_CS BIT(20)
+#define A3700_SPI_DMA_RD_EN BIT(18)
+#define A3700_SPI_FIFO_MODE BIT(17)
+#define A3700_SPI_SRST BIT(16)
+#define A3700_SPI_XFER_START BIT(15)
+#define A3700_SPI_XFER_STOP BIT(14)
+#define A3700_SPI_INST_PIN BIT(13)
+#define A3700_SPI_ADDR_PIN BIT(12)
+#define A3700_SPI_DATA_PIN1 BIT(11)
+#define A3700_SPI_DATA_PIN0 BIT(10)
+#define A3700_SPI_FIFO_FLUSH BIT(9)
+#define A3700_SPI_RW_EN BIT(8)
+#define A3700_SPI_CLK_POL BIT(7)
+#define A3700_SPI_CLK_PHA BIT(6)
+#define A3700_SPI_BYTE_LEN BIT(5)
+#define A3700_SPI_CLK_PRESCALE BIT(0)
+#define A3700_SPI_CLK_PRESCALE_MASK (0x1f)
+#define A3700_SPI_CLK_EVEN_OFFS (0x10)
+
+#define A3700_SPI_WFIFO_THRS_BIT 28
+#define A3700_SPI_RFIFO_THRS_BIT 24
+#define A3700_SPI_FIFO_THRS_MASK 0x7
+
+#define A3700_SPI_DATA_PIN_MASK 0x3
+
+/* A3700_SPI_IF_HDR_CNT_REG */
+#define A3700_SPI_DUMMY_CNT_BIT 12
+#define A3700_SPI_DUMMY_CNT_MASK 0x7
+#define A3700_SPI_RMODE_CNT_BIT 8
+#define A3700_SPI_RMODE_CNT_MASK 0x3
+#define A3700_SPI_ADDR_CNT_BIT 4
+#define A3700_SPI_ADDR_CNT_MASK 0x7
+#define A3700_SPI_INSTR_CNT_BIT 0
+#define A3700_SPI_INSTR_CNT_MASK 0x3
+
+/* A3700_SPI_IF_TIME_REG */
+#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
+
+struct a3700_spi {
+ struct spi_master *master;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int irq;
+ unsigned int flags;
+ bool xmit_data;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ size_t buf_len;
+ u8 byte_len;
+ u32 wait_mask;
+ struct completion done;
+};
+
+static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
+{
+ return readl(a3700_spi->base + offset);
+}
+
+static void spireg_write(struct a3700_spi *a3700_spi, u32 offset, u32 data)
+{
+ writel(data, a3700_spi->base + offset);
+}
+
+static void a3700_spi_auto_cs_unset(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~A3700_SPI_AUTO_CS;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_activate_cs(struct a3700_spi *a3700_spi, unsigned int cs)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ val |= (A3700_SPI_EN << cs);
+ spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val);
+}
+
+static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
+ unsigned int cs)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ val &= ~(A3700_SPI_EN << cs);
+ spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val);
+}
+
+static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
+ unsigned int pin_mode, bool receiving)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~(A3700_SPI_INST_PIN | A3700_SPI_ADDR_PIN);
+ val &= ~(A3700_SPI_DATA_PIN0 | A3700_SPI_DATA_PIN1);
+
+ switch (pin_mode) {
+ case SPI_NBITS_SINGLE:
+ break;
+ case SPI_NBITS_DUAL:
+ val |= A3700_SPI_DATA_PIN0;
+ break;
+ case SPI_NBITS_QUAD:
+ val |= A3700_SPI_DATA_PIN1;
+ /* RX during address reception uses 4-pin */
+ if (receiving)
+ val |= A3700_SPI_ADDR_PIN;
+ break;
+ default:
+ dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
+ return -EINVAL;
+ }
+
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ return 0;
+}
+
+static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi, bool enable)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (enable)
+ val |= A3700_SPI_FIFO_MODE;
+ else
+ val &= ~A3700_SPI_FIFO_MODE;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_mode_set(struct a3700_spi *a3700_spi,
+ unsigned int mode_bits)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+
+ if (mode_bits & SPI_CPOL)
+ val |= A3700_SPI_CLK_POL;
+ else
+ val &= ~A3700_SPI_CLK_POL;
+
+ if (mode_bits & SPI_CPHA)
+ val |= A3700_SPI_CLK_PHA;
+ else
+ val &= ~A3700_SPI_CLK_PHA;
+
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
+ unsigned int speed_hz)
+{
+ u32 val;
+ u32 prescale;
+
+ prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
+
+ /* For prescaler values over 15, we can only set it by steps of 2.
+ * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
+ * 30. We only use this range from 16 to 30.
+ */
+ if (prescale > 15)
+ prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
+
+ val = val | (prescale & A3700_SPI_CLK_PRESCALE_MASK);
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ if (prescale <= 2) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_TIME_REG);
+ val |= A3700_SPI_CLK_CAPT_EDGE;
+ spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val);
+ }
+}
+
+static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (len == 4)
+ val |= A3700_SPI_BYTE_LEN;
+ else
+ val &= ~A3700_SPI_BYTE_LEN;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ a3700_spi->byte_len = len;
+}
+
+static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi)
+{
+ int timeout = A3700_SPI_TIMEOUT;
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_FIFO_FLUSH;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ while (--timeout) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (!(val & A3700_SPI_FIFO_FLUSH))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void a3700_spi_init(struct a3700_spi *a3700_spi)
+{
+ struct spi_master *master = a3700_spi->master;
+ u32 val;
+ int i;
+
+ /* Reset SPI unit */
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_SRST;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ udelay(A3700_SPI_TIMEOUT);
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~A3700_SPI_SRST;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ /* Disable AUTO_CS and deactivate all chip-selects */
+ a3700_spi_auto_cs_unset(a3700_spi);
+ for (i = 0; i < master->num_chipselect; i++)
+ a3700_spi_deactivate_cs(a3700_spi, i);
+
+ /* Enable FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, true);
+
+ /* Set SPI mode */
+ a3700_spi_mode_set(a3700_spi, master->mode_bits);
+
+ /* Reset counters */
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG, 0);
+
+ /* Mask the interrupts and clear cause bits */
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, ~0U);
+}
+
+static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct a3700_spi *a3700_spi;
+ u32 cause;
+
+ a3700_spi = spi_master_get_devdata(master);
+
+ /* Get interrupt causes */
+ cause = spireg_read(a3700_spi, A3700_SPI_INT_STAT_REG);
+
+ if (!cause || !(a3700_spi->wait_mask & cause))
+ return IRQ_NONE;
+
+ /* mask and acknowledge the SPI interrupts */
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, cause);
+
+ /* Wake up the transfer */
+ complete(&a3700_spi->done);
+
+ return IRQ_HANDLED;
+}
+
+static bool a3700_spi_wait_completion(struct spi_device *spi)
+{
+ struct a3700_spi *a3700_spi;
+ unsigned int timeout;
+ unsigned int ctrl_reg;
+ unsigned long timeout_jiffies;
+
+ a3700_spi = spi_master_get_devdata(spi->master);
+
+ /* SPI interrupt is edge-triggered, which means an interrupt will
+ * be generated only when detecting a specific status bit changed
+ * from '0' to '1'. So when we start waiting for a interrupt, we
+ * need to check status bit in control reg first, if it is already 1,
+ * then we do not need to wait for interrupt
+ */
+ ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ if (a3700_spi->wait_mask & ctrl_reg)
+ return true;
+
+ reinit_completion(&a3700_spi->done);
+
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG,
+ a3700_spi->wait_mask);
+
+ timeout_jiffies = msecs_to_jiffies(A3700_SPI_TIMEOUT);
+ timeout = wait_for_completion_timeout(&a3700_spi->done,
+ timeout_jiffies);
+
+ a3700_spi->wait_mask = 0;
+
+ if (timeout)
+ return true;
+
+ /* there might be the case that right after we checked the
+ * status bits in this routine and before start to wait for
+ * interrupt by wait_for_completion_timeout, the interrupt
+ * happens, to avoid missing it we need to double check
+ * status bits in control reg, if it is already 1, then
+ * consider that we have the interrupt successfully and
+ * return true.
+ */
+ ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ if (a3700_spi->wait_mask & ctrl_reg)
+ return true;
+
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+
+ /* Timeout was reached */
+ return false;
+}
+
+static bool a3700_spi_transfer_wait(struct spi_device *spi,
+ unsigned int bit_mask)
+{
+ struct a3700_spi *a3700_spi;
+
+ a3700_spi = spi_master_get_devdata(spi->master);
+ a3700_spi->wait_mask = bit_mask;
+
+ return a3700_spi_wait_completion(spi);
+}
+
+static void a3700_spi_fifo_thres_set(struct a3700_spi *a3700_spi,
+ unsigned int bytes)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_RFIFO_THRS_BIT);
+ val |= (bytes - 1) << A3700_SPI_RFIFO_THRS_BIT;
+ val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_WFIFO_THRS_BIT);
+ val |= (7 - bytes) << A3700_SPI_WFIFO_THRS_BIT;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_transfer_setup(struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi;
+
+ a3700_spi = spi_master_get_devdata(spi->master);
+
+ a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
+
+ /* Use 4 bytes long transfers. Each transfer method has its way to deal
+ * with the remaining bytes for non 4-bytes aligned transfers.
+ */
+ a3700_spi_bytelen_set(a3700_spi, 4);
+
+ /* Initialize the working buffers */
+ a3700_spi->tx_buf = xfer->tx_buf;
+ a3700_spi->rx_buf = xfer->rx_buf;
+ a3700_spi->buf_len = xfer->len;
+}
+
+static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(spi->master);
+
+ if (!enable)
+ a3700_spi_activate_cs(a3700_spi, spi->chip_select);
+ else
+ a3700_spi_deactivate_cs(a3700_spi, spi->chip_select);
+}
+
+static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
+{
+ unsigned int addr_cnt;
+ u32 val = 0;
+
+ /* Clear the header registers */
+ spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
+
+ /* Set header counters */
+ if (a3700_spi->tx_buf) {
+ /*
+ * when tx data is not 4 bytes aligned, there will be unexpected
+ * bytes out of SPI output register, since it always shifts out
+ * as whole 4 bytes. This might cause incorrect transaction with
+ * some devices. To avoid that, use SPI header count feature to
+ * transfer up to 3 bytes of data first, and then make the rest
+ * of data 4-byte aligned.
+ */
+ addr_cnt = a3700_spi->buf_len % 4;
+ if (addr_cnt) {
+ val = (addr_cnt & A3700_SPI_ADDR_CNT_MASK)
+ << A3700_SPI_ADDR_CNT_BIT;
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
+
+ /* Update the buffer length to be transferred */
+ a3700_spi->buf_len -= addr_cnt;
+
+ /* transfer 1~3 bytes through address count */
+ val = 0;
+ while (addr_cnt--) {
+ val = (val << 8) | a3700_spi->tx_buf[0];
+ a3700_spi->tx_buf++;
+ }
+ spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
+ }
+ }
+}
+
+static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ return (val & A3700_SPI_WFIFO_FULL);
+}
+
+static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
+ val = *(u32 *)a3700_spi->tx_buf;
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, cpu_to_le32(val));
+ a3700_spi->buf_len -= 4;
+ a3700_spi->tx_buf += 4;
+ }
+
+ return 0;
+}
+
+static int a3700_is_rfifo_empty(struct a3700_spi *a3700_spi)
+{
+ u32 val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+
+ return (val & A3700_SPI_RFIFO_EMPTY);
+}
+
+static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
+ val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
+ if (a3700_spi->buf_len >= 4) {
+ val = le32_to_cpu(val);
+ memcpy(a3700_spi->rx_buf, &val, 4);
+
+ a3700_spi->buf_len -= 4;
+ a3700_spi->rx_buf += 4;
+ } else {
+ /*
+ * When remain bytes is not larger than 4, we should
+ * avoid memory overwriting and just write the left rx
+ * buffer bytes.
+ */
+ while (a3700_spi->buf_len) {
+ *a3700_spi->rx_buf = val & 0xff;
+ val >>= 8;
+
+ a3700_spi->buf_len--;
+ a3700_spi->rx_buf++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void a3700_spi_transfer_abort_fifo(struct a3700_spi *a3700_spi)
+{
+ int timeout = A3700_SPI_TIMEOUT;
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ while (--timeout) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (!(val & A3700_SPI_XFER_START))
+ break;
+ udelay(1);
+ }
+
+ a3700_spi_fifo_flush(a3700_spi);
+
+ val &= ~A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static int a3700_spi_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+ int ret;
+
+ ret = clk_enable(a3700_spi->clk);
+ if (ret) {
+ dev_err(&spi->dev, "failed to enable clk with error %d\n", ret);
+ return ret;
+ }
+
+ /* Flush the FIFOs */
+ ret = a3700_spi_fifo_flush(a3700_spi);
+ if (ret)
+ return ret;
+
+ a3700_spi_mode_set(a3700_spi, spi->mode);
+
+ return 0;
+}
+
+static int a3700_spi_transfer_one_fifo(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ int ret = 0, timeout = A3700_SPI_TIMEOUT;
+ unsigned int nbits = 0, byte_len;
+ u32 val;
+
+ /* Make sure we use FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, true);
+
+ /* Configure FIFO thresholds */
+ byte_len = xfer->bits_per_word >> 3;
+ a3700_spi_fifo_thres_set(a3700_spi, byte_len);
+
+ if (xfer->tx_buf)
+ nbits = xfer->tx_nbits;
+ else if (xfer->rx_buf)
+ nbits = xfer->rx_nbits;
+
+ a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
+
+ /* Flush the FIFOs */
+ a3700_spi_fifo_flush(a3700_spi);
+
+ /* Transfer first bytes of data when buffer is not 4-byte aligned */
+ a3700_spi_header_set(a3700_spi);
+
+ if (xfer->rx_buf) {
+ /* Clear WFIFO, since it's last 2 bytes are shifted out during
+ * a read operation
+ */
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0);
+
+ /* Set read data length */
+ spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
+ a3700_spi->buf_len);
+ /* Start READ transfer */
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~A3700_SPI_RW_EN;
+ val |= A3700_SPI_XFER_START;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+ } else if (xfer->tx_buf) {
+ /* Start Write transfer */
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= (A3700_SPI_XFER_START | A3700_SPI_RW_EN);
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ /*
+ * If there are data to be written to the SPI device, xmit_data
+ * flag is set true; otherwise the instruction in SPI_INSTR does
+ * not require data to be written to the SPI device, then
+ * xmit_data flag is set false.
+ */
+ a3700_spi->xmit_data = (a3700_spi->buf_len != 0);
+ }
+
+ while (a3700_spi->buf_len) {
+ if (a3700_spi->tx_buf) {
+ /* Wait wfifo ready */
+ if (!a3700_spi_transfer_wait(spi,
+ A3700_SPI_WFIFO_RDY)) {
+ dev_err(&spi->dev,
+ "wait wfifo ready timed out\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+ /* Fill up the wfifo */
+ ret = a3700_spi_fifo_write(a3700_spi);
+ if (ret)
+ goto error;
+ } else if (a3700_spi->rx_buf) {
+ /* Wait rfifo ready */
+ if (!a3700_spi_transfer_wait(spi,
+ A3700_SPI_RFIFO_RDY)) {
+ dev_err(&spi->dev,
+ "wait rfifo ready timed out\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+ /* Drain out the rfifo */
+ ret = a3700_spi_fifo_read(a3700_spi);
+ if (ret)
+ goto error;
+ }
+ }
+
+ /*
+ * Stop a write transfer in fifo mode:
+ * - wait all the bytes in wfifo to be shifted out
+ * - set XFER_STOP bit
+ * - wait XFER_START bit clear
+ * - clear XFER_STOP bit
+ * Stop a read transfer in fifo mode:
+ * - the hardware is to reset the XFER_START bit
+ * after the number of bytes indicated in DIN_CNT
+ * register
+ * - just wait XFER_START bit clear
+ */
+ if (a3700_spi->tx_buf) {
+ if (a3700_spi->xmit_data) {
+ /*
+ * If there are data written to the SPI device, wait
+ * until SPI_WFIFO_EMPTY is 1 to wait for all data to
+ * transfer out of write FIFO.
+ */
+ if (!a3700_spi_transfer_wait(spi,
+ A3700_SPI_WFIFO_EMPTY)) {
+ dev_err(&spi->dev, "wait wfifo empty timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
+ dev_err(&spi->dev, "wait xfer ready timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+ }
+
+ while (--timeout) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (!(val & A3700_SPI_XFER_START))
+ break;
+ udelay(1);
+ }
+
+ if (timeout == 0) {
+ dev_err(&spi->dev, "wait transfer start clear timed out\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ val &= ~A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+ goto out;
+
+error:
+ a3700_spi_transfer_abort_fifo(a3700_spi);
+out:
+ spi_finalize_current_transfer(master);
+
+ return ret;
+}
+
+static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ u32 val;
+
+ /* Disable FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, false);
+
+ while (a3700_spi->buf_len) {
+
+ /* When we have less than 4 bytes to transfer, switch to 1 byte
+ * mode. This is reset after each transfer
+ */
+ if (a3700_spi->buf_len < 4)
+ a3700_spi_bytelen_set(a3700_spi, 1);
+
+ if (a3700_spi->byte_len == 1)
+ val = *a3700_spi->tx_buf;
+ else
+ val = *(u32 *)a3700_spi->tx_buf;
+
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+
+ /* Wait for all the data to be shifted in / out */
+ while (!(spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG) &
+ A3700_SPI_XFER_DONE))
+ cpu_relax();
+
+ val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
+
+ memcpy(a3700_spi->rx_buf, &val, a3700_spi->byte_len);
+
+ a3700_spi->buf_len -= a3700_spi->byte_len;
+ a3700_spi->tx_buf += a3700_spi->byte_len;
+ a3700_spi->rx_buf += a3700_spi->byte_len;
+
+ }
+
+ spi_finalize_current_transfer(master);
+
+ return 0;
+}
+
+static int a3700_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ a3700_spi_transfer_setup(spi, xfer);
+
+ if (xfer->tx_buf && xfer->rx_buf)
+ return a3700_spi_transfer_one_full_duplex(master, spi, xfer);
+
+ return a3700_spi_transfer_one_fifo(master, spi, xfer);
+}
+
+static int a3700_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+
+ clk_disable(a3700_spi->clk);
+
+ return 0;
+}
+
+static const struct of_device_id a3700_spi_dt_ids[] = {
+ { .compatible = "marvell,armada-3700-spi", .data = NULL },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, a3700_spi_dt_ids);
+
+static int a3700_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct spi_master *master;
+ struct a3700_spi *spi;
+ u32 num_cs = 0;
+ int irq, ret = 0;
+
+ master = spi_alloc_master(dev, sizeof(*spi));
+ if (!master) {
+ dev_err(dev, "master allocation failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (of_property_read_u32(of_node, "num-cs", &num_cs)) {
+ dev_err(dev, "could not find num-cs\n");
+ ret = -ENXIO;
+ goto error;
+ }
+
+ master->bus_num = pdev->id;
+ master->dev.of_node = of_node;
+ master->mode_bits = SPI_MODE_3;
+ master->num_chipselect = num_cs;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32);
+ master->prepare_message = a3700_spi_prepare_message;
+ master->transfer_one = a3700_spi_transfer_one;
+ master->unprepare_message = a3700_spi_unprepare_message;
+ master->set_cs = a3700_spi_set_cs;
+ master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
+ SPI_RX_QUAD | SPI_TX_QUAD);
+
+ platform_set_drvdata(pdev, master);
+
+ spi = spi_master_get_devdata(master);
+
+ spi->master = master;
+
+ spi->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spi->base)) {
+ ret = PTR_ERR(spi->base);
+ goto error;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENXIO;
+ goto error;
+ }
+ spi->irq = irq;
+
+ init_completion(&spi->done);
+
+ spi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ dev_err(dev, "could not find clk: %ld\n", PTR_ERR(spi->clk));
+ goto error;
+ }
+
+ ret = clk_prepare(spi->clk);
+ if (ret) {
+ dev_err(dev, "could not prepare clk: %d\n", ret);
+ goto error;
+ }
+
+ master->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
+ clk_get_rate(spi->clk));
+ master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
+ A3700_SPI_MAX_PRESCALE);
+
+ a3700_spi_init(spi);
+
+ ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0,
+ dev_name(dev), master);
+ if (ret) {
+ dev_err(dev, "could not request IRQ: %d\n", ret);
+ goto error_clk;
+ }
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(dev, "Failed to register master\n");
+ goto error_clk;
+ }
+
+ return 0;
+
+error_clk:
+ clk_unprepare(spi->clk);
+error:
+ spi_master_put(master);
+out:
+ return ret;
+}
+
+static int a3700_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct a3700_spi *spi = spi_master_get_devdata(master);
+
+ clk_unprepare(spi->clk);
+
+ return 0;
+}
+
+static struct platform_driver a3700_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(a3700_spi_dt_ids),
+ },
+ .probe = a3700_spi_probe,
+ .remove = a3700_spi_remove,
+};
+
+module_platform_driver(a3700_spi_driver);
+
+MODULE_DESCRIPTION("Armada-3700 SPI driver");
+MODULE_AUTHOR("Wilson Ding <dingwei@marvell.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
new file mode 100644
index 000000000..b90571396
--- /dev/null
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ASPEED FMC/SPI Memory Controller Driver
+ *
+ * Copyright (c) 2015-2022, IBM Corporation.
+ * Copyright (c) 2020, ASPEED Corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define DEVICE_NAME "spi-aspeed-smc"
+
+/* Type setting Register */
+#define CONFIG_REG 0x0
+#define CONFIG_TYPE_SPI 0x2
+
+/* CE Control Register */
+#define CE_CTRL_REG 0x4
+
+/* CEx Control Register */
+#define CE0_CTRL_REG 0x10
+#define CTRL_IO_MODE_MASK GENMASK(30, 28)
+#define CTRL_IO_SINGLE_DATA 0x0
+#define CTRL_IO_DUAL_DATA BIT(29)
+#define CTRL_IO_QUAD_DATA BIT(30)
+#define CTRL_COMMAND_SHIFT 16
+#define CTRL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI only */
+#define CTRL_IO_DUMMY_SET(dummy) \
+ (((((dummy) >> 2) & 0x1) << 14) | (((dummy) & 0x3) << 6))
+#define CTRL_FREQ_SEL_SHIFT 8
+#define CTRL_FREQ_SEL_MASK GENMASK(11, CTRL_FREQ_SEL_SHIFT)
+#define CTRL_CE_STOP_ACTIVE BIT(2)
+#define CTRL_IO_MODE_CMD_MASK GENMASK(1, 0)
+#define CTRL_IO_MODE_NORMAL 0x0
+#define CTRL_IO_MODE_READ 0x1
+#define CTRL_IO_MODE_WRITE 0x2
+#define CTRL_IO_MODE_USER 0x3
+
+#define CTRL_IO_CMD_MASK 0xf0ff40c3
+
+/* CEx Address Decoding Range Register */
+#define CE0_SEGMENT_ADDR_REG 0x30
+
+/* CEx Read timing compensation register */
+#define CE0_TIMING_COMPENSATION_REG 0x94
+
+enum aspeed_spi_ctl_reg_value {
+ ASPEED_SPI_BASE,
+ ASPEED_SPI_READ,
+ ASPEED_SPI_WRITE,
+ ASPEED_SPI_MAX,
+};
+
+struct aspeed_spi;
+
+struct aspeed_spi_chip {
+ struct aspeed_spi *aspi;
+ u32 cs;
+ void __iomem *ctl;
+ void __iomem *ahb_base;
+ u32 ahb_window_size;
+ u32 ctl_val[ASPEED_SPI_MAX];
+ u32 clk_freq;
+};
+
+struct aspeed_spi_data {
+ u32 ctl0;
+ u32 max_cs;
+ bool hastype;
+ u32 mode_bits;
+ u32 we0;
+ u32 timing;
+ u32 hclk_mask;
+ u32 hdiv_max;
+
+ u32 (*segment_start)(struct aspeed_spi *aspi, u32 reg);
+ u32 (*segment_end)(struct aspeed_spi *aspi, u32 reg);
+ u32 (*segment_reg)(struct aspeed_spi *aspi, u32 start, u32 end);
+ int (*calibrate)(struct aspeed_spi_chip *chip, u32 hdiv,
+ const u8 *golden_buf, u8 *test_buf);
+};
+
+#define ASPEED_SPI_MAX_NUM_CS 5
+
+struct aspeed_spi {
+ const struct aspeed_spi_data *data;
+
+ void __iomem *regs;
+ void __iomem *ahb_base;
+ u32 ahb_base_phy;
+ u32 ahb_window_size;
+ struct device *dev;
+
+ struct clk *clk;
+ u32 clk_freq;
+
+ struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS];
+};
+
+static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op)
+{
+ switch (op->data.buswidth) {
+ case 1:
+ return CTRL_IO_SINGLE_DATA;
+ case 2:
+ return CTRL_IO_DUAL_DATA;
+ case 4:
+ return CTRL_IO_QUAD_DATA;
+ default:
+ return CTRL_IO_SINGLE_DATA;
+ }
+}
+
+static void aspeed_spi_set_io_mode(struct aspeed_spi_chip *chip, u32 io_mode)
+{
+ u32 ctl;
+
+ if (io_mode > 0) {
+ ctl = readl(chip->ctl) & ~CTRL_IO_MODE_MASK;
+ ctl |= io_mode;
+ writel(ctl, chip->ctl);
+ }
+}
+
+static void aspeed_spi_start_user(struct aspeed_spi_chip *chip)
+{
+ u32 ctl = chip->ctl_val[ASPEED_SPI_BASE];
+
+ ctl |= CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
+ writel(ctl, chip->ctl);
+
+ ctl &= ~CTRL_CE_STOP_ACTIVE;
+ writel(ctl, chip->ctl);
+}
+
+static void aspeed_spi_stop_user(struct aspeed_spi_chip *chip)
+{
+ u32 ctl = chip->ctl_val[ASPEED_SPI_READ] |
+ CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
+
+ writel(ctl, chip->ctl);
+
+ /* Restore defaults */
+ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
+}
+
+static int aspeed_spi_read_from_ahb(void *buf, void __iomem *src, size_t len)
+{
+ size_t offset = 0;
+
+ if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) &&
+ IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
+ ioread32_rep(src, buf, len >> 2);
+ offset = len & ~0x3;
+ len -= offset;
+ }
+ ioread8_rep(src, (u8 *)buf + offset, len);
+ return 0;
+}
+
+static int aspeed_spi_write_to_ahb(void __iomem *dst, const void *buf, size_t len)
+{
+ size_t offset = 0;
+
+ if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) &&
+ IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
+ iowrite32_rep(dst, buf, len >> 2);
+ offset = len & ~0x3;
+ len -= offset;
+ }
+ iowrite8_rep(dst, (const u8 *)buf + offset, len);
+ return 0;
+}
+
+static int aspeed_spi_send_cmd_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes,
+ u64 offset, u32 opcode)
+{
+ __be32 temp;
+ u32 cmdaddr;
+
+ switch (addr_nbytes) {
+ case 3:
+ cmdaddr = offset & 0xFFFFFF;
+ cmdaddr |= opcode << 24;
+
+ temp = cpu_to_be32(cmdaddr);
+ aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
+ break;
+ case 4:
+ temp = cpu_to_be32(offset);
+ aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1);
+ aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
+ break;
+ default:
+ WARN_ONCE(1, "Unexpected address width %u", addr_nbytes);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int aspeed_spi_read_reg(struct aspeed_spi_chip *chip,
+ const struct spi_mem_op *op)
+{
+ aspeed_spi_start_user(chip);
+ aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
+ aspeed_spi_read_from_ahb(op->data.buf.in,
+ chip->ahb_base, op->data.nbytes);
+ aspeed_spi_stop_user(chip);
+ return 0;
+}
+
+static int aspeed_spi_write_reg(struct aspeed_spi_chip *chip,
+ const struct spi_mem_op *op)
+{
+ aspeed_spi_start_user(chip);
+ aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
+ aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out,
+ op->data.nbytes);
+ aspeed_spi_stop_user(chip);
+ return 0;
+}
+
+static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
+ const struct spi_mem_op *op,
+ u64 offset, size_t len, void *buf)
+{
+ int io_mode = aspeed_spi_get_io_mode(op);
+ u8 dummy = 0xFF;
+ int i;
+ int ret;
+
+ aspeed_spi_start_user(chip);
+
+ ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode);
+ if (ret < 0)
+ return ret;
+
+ if (op->dummy.buswidth && op->dummy.nbytes) {
+ for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++)
+ aspeed_spi_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
+ }
+
+ aspeed_spi_set_io_mode(chip, io_mode);
+
+ aspeed_spi_read_from_ahb(buf, chip->ahb_base, len);
+ aspeed_spi_stop_user(chip);
+ return 0;
+}
+
+static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
+ const struct spi_mem_op *op)
+{
+ int ret;
+
+ aspeed_spi_start_user(chip);
+ ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode);
+ if (ret < 0)
+ return ret;
+ aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes);
+ aspeed_spi_stop_user(chip);
+ return 0;
+}
+
+/* support for 1-1-1, 1-1-2 or 1-1-4 */
+static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (op->cmd.buswidth > 1)
+ return false;
+
+ if (op->addr.nbytes != 0) {
+ if (op->addr.buswidth > 1)
+ return false;
+ if (op->addr.nbytes < 3 || op->addr.nbytes > 4)
+ return false;
+ }
+
+ if (op->dummy.nbytes != 0) {
+ if (op->dummy.buswidth > 1 || op->dummy.nbytes > 7)
+ return false;
+ }
+
+ if (op->data.nbytes != 0 && op->data.buswidth > 4)
+ return false;
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+static const struct aspeed_spi_data ast2400_spi_data;
+
+static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master);
+ struct aspeed_spi_chip *chip = &aspi->chips[mem->spi->chip_select];
+ u32 addr_mode, addr_mode_backup;
+ u32 ctl_val;
+ int ret = 0;
+
+ dev_dbg(aspi->dev,
+ "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x",
+ chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
+
+ addr_mode = readl(aspi->regs + CE_CTRL_REG);
+ addr_mode_backup = addr_mode;
+
+ ctl_val = chip->ctl_val[ASPEED_SPI_BASE];
+ ctl_val &= ~CTRL_IO_CMD_MASK;
+
+ ctl_val |= op->cmd.opcode << CTRL_COMMAND_SHIFT;
+
+ /* 4BYTE address mode */
+ if (op->addr.nbytes) {
+ if (op->addr.nbytes == 4)
+ addr_mode |= (0x11 << chip->cs);
+ else
+ addr_mode &= ~(0x11 << chip->cs);
+
+ if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
+ ctl_val |= CTRL_IO_ADDRESS_4B;
+ }
+
+ if (op->dummy.nbytes)
+ ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
+
+ if (op->data.nbytes)
+ ctl_val |= aspeed_spi_get_io_mode(op);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ ctl_val |= CTRL_IO_MODE_WRITE;
+ else
+ ctl_val |= CTRL_IO_MODE_READ;
+
+ if (addr_mode != addr_mode_backup)
+ writel(addr_mode, aspi->regs + CE_CTRL_REG);
+ writel(ctl_val, chip->ctl);
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (!op->addr.nbytes)
+ ret = aspeed_spi_read_reg(chip, op);
+ else
+ ret = aspeed_spi_read_user(chip, op, op->addr.val,
+ op->data.nbytes, op->data.buf.in);
+ } else {
+ if (!op->addr.nbytes)
+ ret = aspeed_spi_write_reg(chip, op);
+ else
+ ret = aspeed_spi_write_user(chip, op);
+ }
+
+ /* Restore defaults */
+ if (addr_mode != addr_mode_backup)
+ writel(addr_mode_backup, aspi->regs + CE_CTRL_REG);
+ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
+ return ret;
+}
+
+static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ int ret;
+
+ ret = do_aspeed_spi_exec_op(mem, op);
+ if (ret)
+ dev_err(&mem->spi->dev, "operation failed: %d\n", ret);
+ return ret;
+}
+
+static const char *aspeed_spi_get_name(struct spi_mem *mem)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master);
+ struct device *dev = aspi->dev;
+
+ return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select);
+}
+
+struct aspeed_spi_window {
+ u32 cs;
+ u32 offset;
+ u32 size;
+};
+
+static void aspeed_spi_get_windows(struct aspeed_spi *aspi,
+ struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS])
+{
+ const struct aspeed_spi_data *data = aspi->data;
+ u32 reg_val;
+ u32 cs;
+
+ for (cs = 0; cs < aspi->data->max_cs; cs++) {
+ reg_val = readl(aspi->regs + CE0_SEGMENT_ADDR_REG + cs * 4);
+ windows[cs].cs = cs;
+ windows[cs].size = data->segment_end(aspi, reg_val) -
+ data->segment_start(aspi, reg_val);
+ windows[cs].offset = data->segment_start(aspi, reg_val) - aspi->ahb_base_phy;
+ dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs,
+ windows[cs].offset, windows[cs].size);
+ }
+}
+
+/*
+ * On the AST2600, some CE windows are closed by default at reset but
+ * U-Boot should open all.
+ */
+static int aspeed_spi_chip_set_default_window(struct aspeed_spi_chip *chip)
+{
+ struct aspeed_spi *aspi = chip->aspi;
+ struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
+ struct aspeed_spi_window *win = &windows[chip->cs];
+
+ /* No segment registers for the AST2400 SPI controller */
+ if (aspi->data == &ast2400_spi_data) {
+ win->offset = 0;
+ win->size = aspi->ahb_window_size;
+ } else {
+ aspeed_spi_get_windows(aspi, windows);
+ }
+
+ chip->ahb_base = aspi->ahb_base + win->offset;
+ chip->ahb_window_size = win->size;
+
+ dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB",
+ chip->cs, aspi->ahb_base_phy + win->offset,
+ aspi->ahb_base_phy + win->offset + win->size - 1,
+ win->size >> 20);
+
+ return chip->ahb_window_size ? 0 : -1;
+}
+
+static int aspeed_spi_set_window(struct aspeed_spi *aspi,
+ const struct aspeed_spi_window *win)
+{
+ u32 start = aspi->ahb_base_phy + win->offset;
+ u32 end = start + win->size;
+ void __iomem *seg_reg = aspi->regs + CE0_SEGMENT_ADDR_REG + win->cs * 4;
+ u32 seg_val_backup = readl(seg_reg);
+ u32 seg_val = aspi->data->segment_reg(aspi, start, end);
+
+ if (seg_val == seg_val_backup)
+ return 0;
+
+ writel(seg_val, seg_reg);
+
+ /*
+ * Restore initial value if something goes wrong else we could
+ * loose access to the chip.
+ */
+ if (seg_val != readl(seg_reg)) {
+ dev_err(aspi->dev, "CE%d invalid window [ 0x%.8x - 0x%.8x ] %dMB",
+ win->cs, start, end - 1, win->size >> 20);
+ writel(seg_val_backup, seg_reg);
+ return -EIO;
+ }
+
+ if (win->size)
+ dev_dbg(aspi->dev, "CE%d new window [ 0x%.8x - 0x%.8x ] %dMB",
+ win->cs, start, end - 1, win->size >> 20);
+ else
+ dev_dbg(aspi->dev, "CE%d window closed", win->cs);
+
+ return 0;
+}
+
+/*
+ * Yet to be done when possible :
+ * - Align mappings on flash size (we don't have the info)
+ * - ioremap each window, not strictly necessary since the overall window
+ * is correct.
+ */
+static const struct aspeed_spi_data ast2500_spi_data;
+static const struct aspeed_spi_data ast2600_spi_data;
+static const struct aspeed_spi_data ast2600_fmc_data;
+
+static int aspeed_spi_chip_adjust_window(struct aspeed_spi_chip *chip,
+ u32 local_offset, u32 size)
+{
+ struct aspeed_spi *aspi = chip->aspi;
+ struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
+ struct aspeed_spi_window *win = &windows[chip->cs];
+ int ret;
+
+ /* No segment registers for the AST2400 SPI controller */
+ if (aspi->data == &ast2400_spi_data)
+ return 0;
+
+ /*
+ * Due to an HW issue on the AST2500 SPI controller, the CE0
+ * window size should be smaller than the maximum 128MB.
+ */
+ if (aspi->data == &ast2500_spi_data && chip->cs == 0 && size == SZ_128M) {
+ size = 120 << 20;
+ dev_info(aspi->dev, "CE%d window resized to %dMB (AST2500 HW quirk)",
+ chip->cs, size >> 20);
+ }
+
+ /*
+ * The decoding size of AST2600 SPI controller should set at
+ * least 2MB.
+ */
+ if ((aspi->data == &ast2600_spi_data || aspi->data == &ast2600_fmc_data) &&
+ size < SZ_2M) {
+ size = SZ_2M;
+ dev_info(aspi->dev, "CE%d window resized to %dMB (AST2600 Decoding)",
+ chip->cs, size >> 20);
+ }
+
+ aspeed_spi_get_windows(aspi, windows);
+
+ /* Adjust this chip window */
+ win->offset += local_offset;
+ win->size = size;
+
+ if (win->offset + win->size > aspi->ahb_window_size) {
+ win->size = aspi->ahb_window_size - win->offset;
+ dev_warn(aspi->dev, "CE%d window resized to %dMB", chip->cs, win->size >> 20);
+ }
+
+ ret = aspeed_spi_set_window(aspi, win);
+ if (ret)
+ return ret;
+
+ /* Update chip mapping info */
+ chip->ahb_base = aspi->ahb_base + win->offset;
+ chip->ahb_window_size = win->size;
+
+ /*
+ * Also adjust next chip window to make sure that it does not
+ * overlap with the current window.
+ */
+ if (chip->cs < aspi->data->max_cs - 1) {
+ struct aspeed_spi_window *next = &windows[chip->cs + 1];
+
+ /* Change offset and size to keep the same end address */
+ if ((next->offset + next->size) > (win->offset + win->size))
+ next->size = (next->offset + next->size) - (win->offset + win->size);
+ else
+ next->size = 0;
+ next->offset = win->offset + win->size;
+
+ aspeed_spi_set_window(aspi, next);
+ }
+ return 0;
+}
+
+static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip);
+
+static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct aspeed_spi_chip *chip = &aspi->chips[desc->mem->spi->chip_select];
+ struct spi_mem_op *op = &desc->info.op_tmpl;
+ u32 ctl_val;
+ int ret = 0;
+
+ dev_dbg(aspi->dev,
+ "CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n",
+ chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
+ desc->info.offset, desc->info.offset + desc->info.length,
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.nbytes, op->dummy.nbytes);
+
+ chip->clk_freq = desc->mem->spi->max_speed_hz;
+
+ /* Only for reads */
+ if (op->data.dir != SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
+
+ aspeed_spi_chip_adjust_window(chip, desc->info.offset, desc->info.length);
+
+ if (desc->info.length > chip->ahb_window_size)
+ dev_warn(aspi->dev, "CE%d window (%dMB) too small for mapping",
+ chip->cs, chip->ahb_window_size >> 20);
+
+ /* Define the default IO read settings */
+ ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;
+ ctl_val |= aspeed_spi_get_io_mode(op) |
+ op->cmd.opcode << CTRL_COMMAND_SHIFT |
+ CTRL_IO_MODE_READ;
+
+ if (op->dummy.nbytes)
+ ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
+
+ /* Tune 4BYTE address mode */
+ if (op->addr.nbytes) {
+ u32 addr_mode = readl(aspi->regs + CE_CTRL_REG);
+
+ if (op->addr.nbytes == 4)
+ addr_mode |= (0x11 << chip->cs);
+ else
+ addr_mode &= ~(0x11 << chip->cs);
+ writel(addr_mode, aspi->regs + CE_CTRL_REG);
+
+ /* AST2400 SPI controller sets 4BYTE address mode in
+ * CE0 Control Register
+ */
+ if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
+ ctl_val |= CTRL_IO_ADDRESS_4B;
+ }
+
+ /* READ mode is the controller default setting */
+ chip->ctl_val[ASPEED_SPI_READ] = ctl_val;
+ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
+
+ ret = aspeed_spi_do_calibration(chip);
+
+ dev_info(aspi->dev, "CE%d read buswidth:%d [0x%08x]\n",
+ chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]);
+
+ return ret;
+}
+
+static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offset, size_t len, void *buf)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct aspeed_spi_chip *chip = &aspi->chips[desc->mem->spi->chip_select];
+
+ /* Switch to USER command mode if mapping window is too small */
+ if (chip->ahb_window_size < offset + len) {
+ int ret;
+
+ ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf);
+ if (ret < 0)
+ return ret;
+ } else {
+ memcpy_fromio(buf, chip->ahb_base + offset, len);
+ }
+
+ return len;
+}
+
+static const struct spi_controller_mem_ops aspeed_spi_mem_ops = {
+ .supports_op = aspeed_spi_supports_op,
+ .exec_op = aspeed_spi_exec_op,
+ .get_name = aspeed_spi_get_name,
+ .dirmap_create = aspeed_spi_dirmap_create,
+ .dirmap_read = aspeed_spi_dirmap_read,
+};
+
+static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type)
+{
+ u32 reg;
+
+ reg = readl(aspi->regs + CONFIG_REG);
+ reg &= ~(0x3 << (cs * 2));
+ reg |= type << (cs * 2);
+ writel(reg, aspi->regs + CONFIG_REG);
+}
+
+static void aspeed_spi_chip_enable(struct aspeed_spi *aspi, unsigned int cs, bool enable)
+{
+ u32 we_bit = BIT(aspi->data->we0 + cs);
+ u32 reg = readl(aspi->regs + CONFIG_REG);
+
+ if (enable)
+ reg |= we_bit;
+ else
+ reg &= ~we_bit;
+ writel(reg, aspi->regs + CONFIG_REG);
+}
+
+static int aspeed_spi_setup(struct spi_device *spi)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master);
+ const struct aspeed_spi_data *data = aspi->data;
+ unsigned int cs = spi->chip_select;
+ struct aspeed_spi_chip *chip = &aspi->chips[cs];
+
+ chip->aspi = aspi;
+ chip->cs = cs;
+ chip->ctl = aspi->regs + data->ctl0 + cs * 4;
+
+ /* The driver only supports SPI type flash */
+ if (data->hastype)
+ aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI);
+
+ if (aspeed_spi_chip_set_default_window(chip) < 0) {
+ dev_warn(aspi->dev, "CE%d window invalid", cs);
+ return -EINVAL;
+ }
+
+ aspeed_spi_chip_enable(aspi, cs, true);
+
+ chip->ctl_val[ASPEED_SPI_BASE] = CTRL_CE_STOP_ACTIVE | CTRL_IO_MODE_USER;
+
+ dev_dbg(aspi->dev, "CE%d setup done\n", cs);
+ return 0;
+}
+
+static void aspeed_spi_cleanup(struct spi_device *spi)
+{
+ struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master);
+ unsigned int cs = spi->chip_select;
+
+ aspeed_spi_chip_enable(aspi, cs, false);
+
+ dev_dbg(aspi->dev, "CE%d cleanup done\n", cs);
+}
+
+static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable)
+{
+ int cs;
+
+ for (cs = 0; cs < aspi->data->max_cs; cs++)
+ aspeed_spi_chip_enable(aspi, cs, enable);
+}
+
+static int aspeed_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct aspeed_spi_data *data;
+ struct spi_controller *ctlr;
+ struct aspeed_spi *aspi;
+ struct resource *res;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ ctlr = devm_spi_alloc_master(dev, sizeof(*aspi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ aspi = spi_controller_get_devdata(ctlr);
+ platform_set_drvdata(pdev, aspi);
+ aspi->data = data;
+ aspi->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ aspi->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(aspi->regs))
+ return PTR_ERR(aspi->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ aspi->ahb_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(aspi->ahb_base)) {
+ dev_err(dev, "missing AHB mapping window\n");
+ return PTR_ERR(aspi->ahb_base);
+ }
+
+ aspi->ahb_window_size = resource_size(res);
+ aspi->ahb_base_phy = res->start;
+
+ aspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(aspi->clk)) {
+ dev_err(dev, "missing clock\n");
+ return PTR_ERR(aspi->clk);
+ }
+
+ aspi->clk_freq = clk_get_rate(aspi->clk);
+ if (!aspi->clk_freq) {
+ dev_err(dev, "invalid clock\n");
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(aspi->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ /* IRQ is for DMA, which the driver doesn't support yet */
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | data->mode_bits;
+ ctlr->bus_num = pdev->id;
+ ctlr->mem_ops = &aspeed_spi_mem_ops;
+ ctlr->setup = aspeed_spi_setup;
+ ctlr->cleanup = aspeed_spi_cleanup;
+ ctlr->num_chipselect = data->max_cs;
+ ctlr->dev.of_node = dev->of_node;
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_controller failed\n");
+ goto disable_clk;
+ }
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(aspi->clk);
+ return ret;
+}
+
+static int aspeed_spi_remove(struct platform_device *pdev)
+{
+ struct aspeed_spi *aspi = platform_get_drvdata(pdev);
+
+ aspeed_spi_enable(aspi, false);
+ clk_disable_unprepare(aspi->clk);
+ return 0;
+}
+
+/*
+ * AHB mappings
+ */
+
+/*
+ * The Segment Registers of the AST2400 and AST2500 use a 8MB unit.
+ * The address range is encoded with absolute addresses in the overall
+ * mapping window.
+ */
+static u32 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg)
+{
+ return ((reg >> 16) & 0xFF) << 23;
+}
+
+static u32 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg)
+{
+ return ((reg >> 24) & 0xFF) << 23;
+}
+
+static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end)
+{
+ return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24);
+}
+
+/*
+ * The Segment Registers of the AST2600 use a 1MB unit. The address
+ * range is encoded with offsets in the overall mapping window.
+ */
+
+#define AST2600_SEG_ADDR_MASK 0x0ff00000
+
+static u32 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi,
+ u32 reg)
+{
+ u32 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK;
+
+ return aspi->ahb_base_phy + start_offset;
+}
+
+static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi,
+ u32 reg)
+{
+ u32 end_offset = reg & AST2600_SEG_ADDR_MASK;
+
+ /* segment is disabled */
+ if (!end_offset)
+ return aspi->ahb_base_phy;
+
+ return aspi->ahb_base_phy + end_offset + 0x100000;
+}
+
+static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi,
+ u32 start, u32 end)
+{
+ /* disable zero size segments */
+ if (start == end)
+ return 0;
+
+ return ((start & AST2600_SEG_ADDR_MASK) >> 16) |
+ ((end - 1) & AST2600_SEG_ADDR_MASK);
+}
+
+/*
+ * Read timing compensation sequences
+ */
+
+#define CALIBRATE_BUF_SIZE SZ_16K
+
+static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip,
+ const u8 *golden_buf, u8 *test_buf)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
+ if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) {
+#if defined(VERBOSE_DEBUG)
+ print_hex_dump_bytes(DEVICE_NAME " fail: ", DUMP_PREFIX_NONE,
+ test_buf, 0x100);
+#endif
+ return false;
+ }
+ }
+ return true;
+}
+
+#define FREAD_TPASS(i) (((i) / 2) | (((i) & 1) ? 0 : 8))
+
+/*
+ * The timing register is shared by all devices. Only update for CE0.
+ */
+static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
+ const u8 *golden_buf, u8 *test_buf)
+{
+ struct aspeed_spi *aspi = chip->aspi;
+ const struct aspeed_spi_data *data = aspi->data;
+ int i;
+ int good_pass = -1, pass_count = 0;
+ u32 shift = (hdiv - 1) << 2;
+ u32 mask = ~(0xfu << shift);
+ u32 fread_timing_val = 0;
+
+ /* Try HCLK delay 0..5, each one with/without delay and look for a
+ * good pair.
+ */
+ for (i = 0; i < 12; i++) {
+ bool pass;
+
+ if (chip->cs == 0) {
+ fread_timing_val &= mask;
+ fread_timing_val |= FREAD_TPASS(i) << shift;
+ writel(fread_timing_val, aspi->regs + data->timing);
+ }
+ pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
+ dev_dbg(aspi->dev,
+ " * [%08x] %d HCLK delay, %dns DI delay : %s",
+ fread_timing_val, i / 2, (i & 1) ? 0 : 4,
+ pass ? "PASS" : "FAIL");
+ if (pass) {
+ pass_count++;
+ if (pass_count == 3) {
+ good_pass = i - 1;
+ break;
+ }
+ } else {
+ pass_count = 0;
+ }
+ }
+
+ /* No good setting for this frequency */
+ if (good_pass < 0)
+ return -1;
+
+ /* We have at least one pass of margin, let's use first pass */
+ if (chip->cs == 0) {
+ fread_timing_val &= mask;
+ fread_timing_val |= FREAD_TPASS(good_pass) << shift;
+ writel(fread_timing_val, aspi->regs + data->timing);
+ }
+ dev_dbg(aspi->dev, " * -> good is pass %d [0x%08x]",
+ good_pass, fread_timing_val);
+ return 0;
+}
+
+static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size)
+{
+ const u32 *tb32 = (const u32 *)test_buf;
+ u32 i, cnt = 0;
+
+ /* We check if we have enough words that are neither all 0
+ * nor all 1's so the calibration can be considered valid.
+ *
+ * I use an arbitrary threshold for now of 64
+ */
+ size >>= 2;
+ for (i = 0; i < size; i++) {
+ if (tb32[i] != 0 && tb32[i] != 0xffffffff)
+ cnt++;
+ }
+ return cnt >= 64;
+}
+
+static const u32 aspeed_spi_hclk_divs[] = {
+ 0xf, /* HCLK */
+ 0x7, /* HCLK/2 */
+ 0xe, /* HCLK/3 */
+ 0x6, /* HCLK/4 */
+ 0xd, /* HCLK/5 */
+};
+
+#define ASPEED_SPI_HCLK_DIV(i) \
+ (aspeed_spi_hclk_divs[(i) - 1] << CTRL_FREQ_SEL_SHIFT)
+
+static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
+{
+ struct aspeed_spi *aspi = chip->aspi;
+ const struct aspeed_spi_data *data = aspi->data;
+ u32 ahb_freq = aspi->clk_freq;
+ u32 max_freq = chip->clk_freq;
+ u32 ctl_val;
+ u8 *golden_buf = NULL;
+ u8 *test_buf = NULL;
+ int i, rc, best_div = -1;
+
+ dev_dbg(aspi->dev, "calculate timing compensation - AHB freq: %d MHz",
+ ahb_freq / 1000000);
+
+ /*
+ * use the related low frequency to get check calibration data
+ * and get golden data.
+ */
+ ctl_val = chip->ctl_val[ASPEED_SPI_READ] & data->hclk_mask;
+ writel(ctl_val, chip->ctl);
+
+ test_buf = kzalloc(CALIBRATE_BUF_SIZE * 2, GFP_KERNEL);
+ if (!test_buf)
+ return -ENOMEM;
+
+ golden_buf = test_buf + CALIBRATE_BUF_SIZE;
+
+ memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
+ if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) {
+ dev_info(aspi->dev, "Calibration area too uniform, using low speed");
+ goto no_calib;
+ }
+
+#if defined(VERBOSE_DEBUG)
+ print_hex_dump_bytes(DEVICE_NAME " good: ", DUMP_PREFIX_NONE,
+ golden_buf, 0x100);
+#endif
+
+ /* Now we iterate the HCLK dividers until we find our breaking point */
+ for (i = ARRAY_SIZE(aspeed_spi_hclk_divs); i > data->hdiv_max - 1; i--) {
+ u32 tv, freq;
+
+ freq = ahb_freq / i;
+ if (freq > max_freq)
+ continue;
+
+ /* Set the timing */
+ tv = chip->ctl_val[ASPEED_SPI_READ] | ASPEED_SPI_HCLK_DIV(i);
+ writel(tv, chip->ctl);
+ dev_dbg(aspi->dev, "Trying HCLK/%d [%08x] ...", i, tv);
+ rc = data->calibrate(chip, i, golden_buf, test_buf);
+ if (rc == 0)
+ best_div = i;
+ }
+
+ /* Nothing found ? */
+ if (best_div < 0) {
+ dev_warn(aspi->dev, "No good frequency, using dumb slow");
+ } else {
+ dev_dbg(aspi->dev, "Found good read timings at HCLK/%d", best_div);
+
+ /* Record the freq */
+ for (i = 0; i < ASPEED_SPI_MAX; i++)
+ chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) |
+ ASPEED_SPI_HCLK_DIV(best_div);
+ }
+
+no_calib:
+ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
+ kfree(test_buf);
+ return 0;
+}
+
+#define TIMING_DELAY_DI BIT(3)
+#define TIMING_DELAY_HCYCLE_MAX 5
+#define TIMING_REG_AST2600(chip) \
+ ((chip)->aspi->regs + (chip)->aspi->data->timing + \
+ (chip)->cs * 4)
+
+static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
+ const u8 *golden_buf, u8 *test_buf)
+{
+ struct aspeed_spi *aspi = chip->aspi;
+ int hcycle;
+ u32 shift = (hdiv - 2) << 3;
+ u32 mask = ~(0xfu << shift);
+ u32 fread_timing_val = 0;
+
+ for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) {
+ int delay_ns;
+ bool pass = false;
+
+ fread_timing_val &= mask;
+ fread_timing_val |= hcycle << shift;
+
+ /* no DI input delay first */
+ writel(fread_timing_val, TIMING_REG_AST2600(chip));
+ pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
+ dev_dbg(aspi->dev,
+ " * [%08x] %d HCLK delay, DI delay none : %s",
+ fread_timing_val, hcycle, pass ? "PASS" : "FAIL");
+ if (pass)
+ return 0;
+
+ /* Add DI input delays */
+ fread_timing_val &= mask;
+ fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift;
+
+ for (delay_ns = 0; delay_ns < 0x10; delay_ns++) {
+ fread_timing_val &= ~(0xf << (4 + shift));
+ fread_timing_val |= delay_ns << (4 + shift);
+
+ writel(fread_timing_val, TIMING_REG_AST2600(chip));
+ pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
+ dev_dbg(aspi->dev,
+ " * [%08x] %d HCLK delay, DI delay %d.%dns : %s",
+ fread_timing_val, hcycle, (delay_ns + 1) / 2,
+ (delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL");
+ /*
+ * TODO: This is optimistic. We should look
+ * for a working interval and save the middle
+ * value in the read timing register.
+ */
+ if (pass)
+ return 0;
+ }
+ }
+
+ /* No good setting for this frequency */
+ return -1;
+}
+
+/*
+ * Platform definitions
+ */
+static const struct aspeed_spi_data ast2400_fmc_data = {
+ .max_cs = 5,
+ .hastype = true,
+ .we0 = 16,
+ .ctl0 = CE0_CTRL_REG,
+ .timing = CE0_TIMING_COMPENSATION_REG,
+ .hclk_mask = 0xfffff0ff,
+ .hdiv_max = 1,
+ .calibrate = aspeed_spi_calibrate,
+ .segment_start = aspeed_spi_segment_start,
+ .segment_end = aspeed_spi_segment_end,
+ .segment_reg = aspeed_spi_segment_reg,
+};
+
+static const struct aspeed_spi_data ast2400_spi_data = {
+ .max_cs = 1,
+ .hastype = false,
+ .we0 = 0,
+ .ctl0 = 0x04,
+ .timing = 0x14,
+ .hclk_mask = 0xfffff0ff,
+ .hdiv_max = 1,
+ .calibrate = aspeed_spi_calibrate,
+ /* No segment registers */
+};
+
+static const struct aspeed_spi_data ast2500_fmc_data = {
+ .max_cs = 3,
+ .hastype = true,
+ .we0 = 16,
+ .ctl0 = CE0_CTRL_REG,
+ .timing = CE0_TIMING_COMPENSATION_REG,
+ .hclk_mask = 0xffffd0ff,
+ .hdiv_max = 1,
+ .calibrate = aspeed_spi_calibrate,
+ .segment_start = aspeed_spi_segment_start,
+ .segment_end = aspeed_spi_segment_end,
+ .segment_reg = aspeed_spi_segment_reg,
+};
+
+static const struct aspeed_spi_data ast2500_spi_data = {
+ .max_cs = 2,
+ .hastype = false,
+ .we0 = 16,
+ .ctl0 = CE0_CTRL_REG,
+ .timing = CE0_TIMING_COMPENSATION_REG,
+ .hclk_mask = 0xffffd0ff,
+ .hdiv_max = 1,
+ .calibrate = aspeed_spi_calibrate,
+ .segment_start = aspeed_spi_segment_start,
+ .segment_end = aspeed_spi_segment_end,
+ .segment_reg = aspeed_spi_segment_reg,
+};
+
+static const struct aspeed_spi_data ast2600_fmc_data = {
+ .max_cs = 3,
+ .hastype = false,
+ .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
+ .we0 = 16,
+ .ctl0 = CE0_CTRL_REG,
+ .timing = CE0_TIMING_COMPENSATION_REG,
+ .hclk_mask = 0xf0fff0ff,
+ .hdiv_max = 2,
+ .calibrate = aspeed_spi_ast2600_calibrate,
+ .segment_start = aspeed_spi_segment_ast2600_start,
+ .segment_end = aspeed_spi_segment_ast2600_end,
+ .segment_reg = aspeed_spi_segment_ast2600_reg,
+};
+
+static const struct aspeed_spi_data ast2600_spi_data = {
+ .max_cs = 2,
+ .hastype = false,
+ .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
+ .we0 = 16,
+ .ctl0 = CE0_CTRL_REG,
+ .timing = CE0_TIMING_COMPENSATION_REG,
+ .hclk_mask = 0xf0fff0ff,
+ .hdiv_max = 2,
+ .calibrate = aspeed_spi_ast2600_calibrate,
+ .segment_start = aspeed_spi_segment_ast2600_start,
+ .segment_end = aspeed_spi_segment_ast2600_end,
+ .segment_reg = aspeed_spi_segment_ast2600_reg,
+};
+
+static const struct of_device_id aspeed_spi_matches[] = {
+ { .compatible = "aspeed,ast2400-fmc", .data = &ast2400_fmc_data },
+ { .compatible = "aspeed,ast2400-spi", .data = &ast2400_spi_data },
+ { .compatible = "aspeed,ast2500-fmc", .data = &ast2500_fmc_data },
+ { .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data },
+ { .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data },
+ { .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aspeed_spi_matches);
+
+static struct platform_driver aspeed_spi_driver = {
+ .probe = aspeed_spi_probe,
+ .remove = aspeed_spi_remove,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_spi_matches,
+ }
+};
+
+module_platform_driver(aspeed_spi_driver);
+
+MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver");
+MODULE_AUTHOR("Chin-Ting Kuo <chin-ting_kuo@aspeedtech.com>");
+MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
new file mode 100644
index 000000000..9cd738682
--- /dev/null
+++ b/drivers/spi/spi-at91-usart.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for AT91 USART Controllers as SPI
+//
+// Copyright (C) 2018 Microchip Technology Inc.
+//
+// Author: Radu Pirea <radu.pirea@microchip.com>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/spi/spi.h>
+
+#define US_CR 0x00
+#define US_MR 0x04
+#define US_IER 0x08
+#define US_IDR 0x0C
+#define US_CSR 0x14
+#define US_RHR 0x18
+#define US_THR 0x1C
+#define US_BRGR 0x20
+#define US_VERSION 0xFC
+
+#define US_CR_RSTRX BIT(2)
+#define US_CR_RSTTX BIT(3)
+#define US_CR_RXEN BIT(4)
+#define US_CR_RXDIS BIT(5)
+#define US_CR_TXEN BIT(6)
+#define US_CR_TXDIS BIT(7)
+
+#define US_MR_SPI_MASTER 0x0E
+#define US_MR_CHRL GENMASK(7, 6)
+#define US_MR_CPHA BIT(8)
+#define US_MR_CPOL BIT(16)
+#define US_MR_CLKO BIT(18)
+#define US_MR_WRDBT BIT(20)
+#define US_MR_LOOP BIT(15)
+
+#define US_IR_RXRDY BIT(0)
+#define US_IR_TXRDY BIT(1)
+#define US_IR_OVRE BIT(5)
+
+#define US_BRGR_SIZE BIT(16)
+
+#define US_MIN_CLK_DIV 0x06
+#define US_MAX_CLK_DIV BIT(16)
+
+#define US_RESET (US_CR_RSTRX | US_CR_RSTTX)
+#define US_DISABLE (US_CR_RXDIS | US_CR_TXDIS)
+#define US_ENABLE (US_CR_RXEN | US_CR_TXEN)
+#define US_OVRE_RXRDY_IRQS (US_IR_OVRE | US_IR_RXRDY)
+
+#define US_INIT \
+ (US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
+#define US_DMA_MIN_BYTES 16
+#define US_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+/* Register access macros */
+#define at91_usart_spi_readl(port, reg) \
+ readl_relaxed((port)->regs + US_##reg)
+#define at91_usart_spi_writel(port, reg, value) \
+ writel_relaxed((value), (port)->regs + US_##reg)
+
+#define at91_usart_spi_readb(port, reg) \
+ readb_relaxed((port)->regs + US_##reg)
+#define at91_usart_spi_writeb(port, reg, value) \
+ writeb_relaxed((value), (port)->regs + US_##reg)
+
+struct at91_usart_spi {
+ struct platform_device *mpdev;
+ struct spi_transfer *current_transfer;
+ void __iomem *regs;
+ struct device *dev;
+ struct clk *clk;
+
+ struct completion xfer_completion;
+
+ /*used in interrupt to protect data reading*/
+ spinlock_t lock;
+
+ phys_addr_t phybase;
+
+ int irq;
+ unsigned int current_tx_remaining_bytes;
+ unsigned int current_rx_remaining_bytes;
+
+ u32 spi_clk;
+ u32 status;
+
+ bool xfer_failed;
+ bool use_dma;
+};
+
+static void dma_callback(void *data)
+{
+ struct spi_controller *ctlr = data;
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
+ aus->current_rx_remaining_bytes = 0;
+ complete(&aus->xfer_completion);
+}
+
+static bool at91_usart_spi_can_dma(struct spi_controller *ctrl,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+
+ return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES;
+}
+
+static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
+ struct at91_usart_spi *aus)
+{
+ struct dma_slave_config slave_config;
+ struct device *dev = &aus->mpdev->dev;
+ phys_addr_t phybase = aus->phybase;
+ dma_cap_mask_t mask;
+ int err = 0;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ ctlr->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR_OR_NULL(ctlr->dma_tx)) {
+ if (IS_ERR(ctlr->dma_tx)) {
+ err = PTR_ERR(ctlr->dma_tx);
+ goto at91_usart_spi_error_clear;
+ }
+
+ dev_dbg(dev,
+ "DMA TX channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto at91_usart_spi_error_clear;
+ }
+
+ ctlr->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR_OR_NULL(ctlr->dma_rx)) {
+ if (IS_ERR(ctlr->dma_rx)) {
+ err = PTR_ERR(ctlr->dma_rx);
+ goto at91_usart_spi_error;
+ }
+
+ dev_dbg(dev,
+ "DMA RX channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto at91_usart_spi_error;
+ }
+
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_addr = (dma_addr_t)phybase + US_THR;
+ slave_config.src_addr = (dma_addr_t)phybase + US_RHR;
+ slave_config.src_maxburst = 1;
+ slave_config.dst_maxburst = 1;
+ slave_config.device_fc = false;
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ if (dmaengine_slave_config(ctlr->dma_rx, &slave_config)) {
+ dev_err(&ctlr->dev,
+ "failed to configure rx dma channel\n");
+ err = -EINVAL;
+ goto at91_usart_spi_error;
+ }
+
+ slave_config.direction = DMA_MEM_TO_DEV;
+ if (dmaengine_slave_config(ctlr->dma_tx, &slave_config)) {
+ dev_err(&ctlr->dev,
+ "failed to configure tx dma channel\n");
+ err = -EINVAL;
+ goto at91_usart_spi_error;
+ }
+
+ aus->use_dma = true;
+ return 0;
+
+at91_usart_spi_error:
+ if (!IS_ERR_OR_NULL(ctlr->dma_tx))
+ dma_release_channel(ctlr->dma_tx);
+ if (!IS_ERR_OR_NULL(ctlr->dma_rx))
+ dma_release_channel(ctlr->dma_rx);
+ ctlr->dma_tx = NULL;
+ ctlr->dma_rx = NULL;
+
+at91_usart_spi_error_clear:
+ return err;
+}
+
+static void at91_usart_spi_release_dma(struct spi_controller *ctlr)
+{
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+}
+
+static void at91_usart_spi_stop_dma(struct spi_controller *ctlr)
+{
+ if (ctlr->dma_rx)
+ dmaengine_terminate_all(ctlr->dma_rx);
+ if (ctlr->dma_tx)
+ dmaengine_terminate_all(ctlr->dma_tx);
+}
+
+static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct dma_chan *rxchan = ctlr->dma_rx;
+ struct dma_chan *txchan = ctlr->dma_tx;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+ dma_cookie_t cookie;
+
+ /* Disable RX interrupt */
+ at91_usart_spi_writel(aus, IDR, US_IR_RXRDY);
+
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto at91_usart_spi_err_dma;
+
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!txdesc)
+ goto at91_usart_spi_err_dma;
+
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = ctlr;
+
+ cookie = rxdesc->tx_submit(rxdesc);
+ if (dma_submit_error(cookie))
+ goto at91_usart_spi_err_dma;
+
+ cookie = txdesc->tx_submit(txdesc);
+ if (dma_submit_error(cookie))
+ goto at91_usart_spi_err_dma;
+
+ rxchan->device->device_issue_pending(rxchan);
+ txchan->device->device_issue_pending(txchan);
+
+ return 0;
+
+at91_usart_spi_err_dma:
+ /* Enable RX interrupt if something fails and fallback to PIO */
+ at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
+ at91_usart_spi_stop_dma(ctlr);
+
+ return -ENOMEM;
+}
+
+static unsigned long at91_usart_spi_dma_timeout(struct at91_usart_spi *aus)
+{
+ return wait_for_completion_timeout(&aus->xfer_completion,
+ US_DMA_TIMEOUT);
+}
+
+static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_TXRDY;
+}
+
+static inline u32 at91_usart_spi_rx_ready(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_RXRDY;
+}
+
+static inline u32 at91_usart_spi_check_overrun(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_OVRE;
+}
+
+static inline u32 at91_usart_spi_read_status(struct at91_usart_spi *aus)
+{
+ aus->status = at91_usart_spi_readl(aus, CSR);
+ return aus->status;
+}
+
+static inline void at91_usart_spi_tx(struct at91_usart_spi *aus)
+{
+ unsigned int len = aus->current_transfer->len;
+ unsigned int remaining = aus->current_tx_remaining_bytes;
+ const u8 *tx_buf = aus->current_transfer->tx_buf;
+
+ if (!remaining)
+ return;
+
+ if (at91_usart_spi_tx_ready(aus)) {
+ at91_usart_spi_writeb(aus, THR, tx_buf[len - remaining]);
+ aus->current_tx_remaining_bytes--;
+ }
+}
+
+static inline void at91_usart_spi_rx(struct at91_usart_spi *aus)
+{
+ int len = aus->current_transfer->len;
+ int remaining = aus->current_rx_remaining_bytes;
+ u8 *rx_buf = aus->current_transfer->rx_buf;
+
+ if (!remaining)
+ return;
+
+ rx_buf[len - remaining] = at91_usart_spi_readb(aus, RHR);
+ aus->current_rx_remaining_bytes--;
+}
+
+static inline void
+at91_usart_spi_set_xfer_speed(struct at91_usart_spi *aus,
+ struct spi_transfer *xfer)
+{
+ at91_usart_spi_writel(aus, BRGR,
+ DIV_ROUND_UP(aus->spi_clk, xfer->speed_hz));
+}
+
+static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_controller *controller = dev_id;
+ struct at91_usart_spi *aus = spi_master_get_devdata(controller);
+
+ spin_lock(&aus->lock);
+ at91_usart_spi_read_status(aus);
+
+ if (at91_usart_spi_check_overrun(aus)) {
+ aus->xfer_failed = true;
+ at91_usart_spi_writel(aus, IDR, US_IR_OVRE | US_IR_RXRDY);
+ spin_unlock(&aus->lock);
+ return IRQ_HANDLED;
+ }
+
+ if (at91_usart_spi_rx_ready(aus)) {
+ at91_usart_spi_rx(aus);
+ spin_unlock(&aus->lock);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock(&aus->lock);
+
+ return IRQ_NONE;
+}
+
+static int at91_usart_spi_setup(struct spi_device *spi)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
+ u32 *ausd = spi->controller_state;
+ unsigned int mr = at91_usart_spi_readl(aus, MR);
+
+ if (spi->mode & SPI_CPOL)
+ mr |= US_MR_CPOL;
+ else
+ mr &= ~US_MR_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ mr |= US_MR_CPHA;
+ else
+ mr &= ~US_MR_CPHA;
+
+ if (spi->mode & SPI_LOOP)
+ mr |= US_MR_LOOP;
+ else
+ mr &= ~US_MR_LOOP;
+
+ if (!ausd) {
+ ausd = kzalloc(sizeof(*ausd), GFP_KERNEL);
+ if (!ausd)
+ return -ENOMEM;
+
+ spi->controller_state = ausd;
+ }
+
+ *ausd = mr;
+
+ dev_dbg(&spi->dev,
+ "setup: bpw %u mode 0x%x -> mr %d %08x\n",
+ spi->bits_per_word, spi->mode, spi->chip_select, mr);
+
+ return 0;
+}
+
+static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ unsigned long dma_timeout = 0;
+ int ret = 0;
+
+ at91_usart_spi_set_xfer_speed(aus, xfer);
+ aus->xfer_failed = false;
+ aus->current_transfer = xfer;
+ aus->current_tx_remaining_bytes = xfer->len;
+ aus->current_rx_remaining_bytes = xfer->len;
+
+ while ((aus->current_tx_remaining_bytes ||
+ aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
+ reinit_completion(&aus->xfer_completion);
+ if (at91_usart_spi_can_dma(ctlr, spi, xfer) &&
+ !ret) {
+ ret = at91_usart_spi_dma_transfer(ctlr, xfer);
+ if (ret)
+ continue;
+
+ dma_timeout = at91_usart_spi_dma_timeout(aus);
+
+ if (WARN_ON(dma_timeout == 0)) {
+ dev_err(&spi->dev, "DMA transfer timeout\n");
+ return -EIO;
+ }
+ aus->current_tx_remaining_bytes = 0;
+ } else {
+ at91_usart_spi_read_status(aus);
+ at91_usart_spi_tx(aus);
+ }
+
+ cpu_relax();
+ }
+
+ if (aus->xfer_failed) {
+ dev_err(aus->dev, "Overrun!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct spi_device *spi = message->spi;
+ u32 *ausd = spi->controller_state;
+
+ at91_usart_spi_writel(aus, CR, US_ENABLE);
+ at91_usart_spi_writel(aus, IER, US_OVRE_RXRDY_IRQS);
+ at91_usart_spi_writel(aus, MR, *ausd);
+
+ return 0;
+}
+
+static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
+ at91_usart_spi_writel(aus, IDR, US_OVRE_RXRDY_IRQS);
+
+ return 0;
+}
+
+static void at91_usart_spi_cleanup(struct spi_device *spi)
+{
+ struct at91_usart_spi_device *ausd = spi->controller_state;
+
+ spi->controller_state = NULL;
+ kfree(ausd);
+}
+
+static void at91_usart_spi_init(struct at91_usart_spi *aus)
+{
+ at91_usart_spi_writel(aus, MR, US_INIT);
+ at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
+}
+
+static int at91_usart_gpio_setup(struct platform_device *pdev)
+{
+ struct gpio_descs *cs_gpios;
+
+ cs_gpios = devm_gpiod_get_array_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
+
+ if (IS_ERR(cs_gpios))
+ return PTR_ERR(cs_gpios);
+
+ return 0;
+}
+
+static int at91_usart_spi_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct spi_controller *controller;
+ struct at91_usart_spi *aus;
+ struct clk *clk;
+ int irq;
+ int ret;
+
+ regs = platform_get_resource(to_platform_device(pdev->dev.parent),
+ IORESOURCE_MEM, 0);
+ if (!regs)
+ return -EINVAL;
+
+ irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
+ if (irq < 0)
+ return irq;
+
+ clk = devm_clk_get(pdev->dev.parent, "usart");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = -ENOMEM;
+ controller = spi_alloc_master(&pdev->dev, sizeof(*aus));
+ if (!controller)
+ goto at91_usart_spi_probe_fail;
+
+ ret = at91_usart_gpio_setup(pdev);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
+ controller->dev.of_node = pdev->dev.parent->of_node;
+ controller->bits_per_word_mask = SPI_BPW_MASK(8);
+ controller->setup = at91_usart_spi_setup;
+ controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->transfer_one = at91_usart_spi_transfer_one;
+ controller->prepare_message = at91_usart_spi_prepare_message;
+ controller->unprepare_message = at91_usart_spi_unprepare_message;
+ controller->can_dma = at91_usart_spi_can_dma;
+ controller->cleanup = at91_usart_spi_cleanup;
+ controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
+ US_MIN_CLK_DIV);
+ controller->min_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
+ US_MAX_CLK_DIV);
+ platform_set_drvdata(pdev, controller);
+
+ aus = spi_master_get_devdata(controller);
+
+ aus->dev = &pdev->dev;
+ aus->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(aus->regs)) {
+ ret = PTR_ERR(aus->regs);
+ goto at91_usart_spi_probe_fail;
+ }
+
+ aus->irq = irq;
+ aus->clk = clk;
+
+ ret = devm_request_irq(&pdev->dev, irq, at91_usart_spi_interrupt, 0,
+ dev_name(&pdev->dev), controller);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ aus->spi_clk = clk_get_rate(clk);
+ at91_usart_spi_init(aus);
+
+ aus->phybase = regs->start;
+
+ aus->mpdev = to_platform_device(pdev->dev.parent);
+
+ ret = at91_usart_spi_configure_dma(controller, aus);
+ if (ret)
+ goto at91_usart_fail_dma;
+
+ spin_lock_init(&aus->lock);
+ init_completion(&aus->xfer_completion);
+
+ ret = devm_spi_register_master(&pdev->dev, controller);
+ if (ret)
+ goto at91_usart_fail_register_master;
+
+ dev_info(&pdev->dev,
+ "AT91 USART SPI Controller version 0x%x at %pa (irq %d)\n",
+ at91_usart_spi_readl(aus, VERSION),
+ &regs->start, irq);
+
+ return 0;
+
+at91_usart_fail_register_master:
+ at91_usart_spi_release_dma(controller);
+at91_usart_fail_dma:
+ clk_disable_unprepare(clk);
+at91_usart_spi_probe_fail:
+ spi_master_put(controller);
+ return ret;
+}
+
+__maybe_unused static int at91_usart_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ clk_disable_unprepare(aus->clk);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+__maybe_unused static int at91_usart_spi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+
+ pinctrl_pm_select_default_state(dev);
+
+ return clk_prepare_enable(aus->clk);
+}
+
+__maybe_unused static int at91_usart_spi_suspend(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_controller_suspend(ctrl);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ at91_usart_spi_runtime_suspend(dev);
+
+ return 0;
+}
+
+__maybe_unused static int at91_usart_spi_resume(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+ int ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = at91_usart_spi_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ at91_usart_spi_init(aus);
+
+ return spi_controller_resume(ctrl);
+}
+
+static int at91_usart_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_release_dma(ctlr);
+ clk_disable_unprepare(aus->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops at91_usart_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(at91_usart_spi_suspend, at91_usart_spi_resume)
+ SET_RUNTIME_PM_OPS(at91_usart_spi_runtime_suspend,
+ at91_usart_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver at91_usart_spi_driver = {
+ .driver = {
+ .name = "at91_usart_spi",
+ .pm = &at91_usart_spi_pm_ops,
+ },
+ .probe = at91_usart_spi_probe,
+ .remove = at91_usart_spi_remove,
+};
+
+module_platform_driver(at91_usart_spi_driver);
+
+MODULE_DESCRIPTION("Microchip AT91 USART SPI Controller driver");
+MODULE_AUTHOR("Radu Pirea <radu.pirea@microchip.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:at91_usart_spi");
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
new file mode 100644
index 000000000..607e7a49f
--- /dev/null
+++ b/drivers/spi/spi-ath79.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI controller driver for the Atheros AR71XX/AR724X/AR913X SoCs
+ *
+ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This driver has been based on the spi-gpio.c:
+ * Copyright (C) 2006,2008 David Brownell
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#define DRV_NAME "ath79-spi"
+
+#define ATH79_SPI_RRW_DELAY_FACTOR 12000
+#define MHZ (1000 * 1000)
+
+#define AR71XX_SPI_REG_FS 0x00 /* Function Select */
+#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */
+#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */
+#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */
+
+#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */
+
+#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */
+#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */
+#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n))
+
+struct ath79_spi {
+ struct spi_bitbang bitbang;
+ u32 ioc_base;
+ u32 reg_ctrl;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int rrw_delay;
+};
+
+static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned int reg)
+{
+ return ioread32(sp->base + reg);
+}
+
+static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned int reg, u32 val)
+{
+ iowrite32(val, sp->base + reg);
+}
+
+static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
+{
+ return spi_master_get_devdata(spi->master);
+}
+
+static inline void ath79_spi_delay(struct ath79_spi *sp, unsigned int nsecs)
+{
+ if (nsecs > sp->rrw_delay)
+ ndelay(nsecs - sp->rrw_delay);
+}
+
+static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+ int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active;
+ u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
+
+ if (cs_high)
+ sp->ioc_base |= cs_bit;
+ else
+ sp->ioc_base &= ~cs_bit;
+
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+}
+
+static void ath79_spi_enable(struct ath79_spi *sp)
+{
+ /* enable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
+
+ /* save CTRL register */
+ sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL);
+ sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC);
+
+ /* clear clk and mosi in the base state */
+ sp->ioc_base &= ~(AR71XX_SPI_IOC_DO | AR71XX_SPI_IOC_CLK);
+
+ /* TODO: setup speed? */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
+}
+
+static void ath79_spi_disable(struct ath79_spi *sp)
+{
+ /* restore CTRL register */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
+ /* disable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
+}
+
+static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
+ u32 word, u8 bits, unsigned flags)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+ u32 ioc = sp->ioc_base;
+
+ /* clock starts at inactive polarity */
+ for (word <<= (32 - bits); likely(bits); bits--) {
+ u32 out;
+
+ if (word & (1 << 31))
+ out = ioc | AR71XX_SPI_IOC_DO;
+ else
+ out = ioc & ~AR71XX_SPI_IOC_DO;
+
+ /* setup MSB (to slave) on trailing edge */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
+ ath79_spi_delay(sp, nsecs);
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
+ ath79_spi_delay(sp, nsecs);
+ if (bits == 1)
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
+
+ word <<= 1;
+ }
+
+ return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
+}
+
+static int ath79_exec_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(mem->spi);
+
+ /* Ensures that reading is performed on device connected to hardware cs0 */
+ if (mem->spi->chip_select || mem->spi->cs_gpiod)
+ return -ENOTSUPP;
+
+ /* Only use for fast-read op. */
+ if (op->cmd.opcode != 0x0b || op->data.dir != SPI_MEM_DATA_IN ||
+ op->addr.nbytes != 3 || op->dummy.nbytes != 1)
+ return -ENOTSUPP;
+
+ /* disable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
+
+ memcpy_fromio(op->data.buf.in, sp->base + op->addr.val, op->data.nbytes);
+
+ /* enable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
+
+ /* restore IOC register */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops ath79_mem_ops = {
+ .exec_op = ath79_exec_mem_op,
+};
+
+static int ath79_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct ath79_spi *sp;
+ unsigned long rate;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sp));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "failed to allocate spi master\n");
+ return -ENOMEM;
+ }
+
+ sp = spi_master_get_devdata(master);
+ master->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, sp);
+
+ master->use_gpio_descriptors = true;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ master->flags = SPI_MASTER_GPIO_SS;
+ master->num_chipselect = 3;
+ master->mem_ops = &ath79_mem_ops;
+
+ sp->bitbang.master = master;
+ sp->bitbang.chipselect = ath79_spi_chipselect;
+ sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
+ sp->bitbang.flags = SPI_CS_HIGH;
+
+ sp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sp->base)) {
+ ret = PTR_ERR(sp->base);
+ goto err_put_master;
+ }
+
+ sp->clk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sp->clk)) {
+ ret = PTR_ERR(sp->clk);
+ goto err_put_master;
+ }
+
+ ret = clk_prepare_enable(sp->clk);
+ if (ret)
+ goto err_put_master;
+
+ rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ sp->rrw_delay = ATH79_SPI_RRW_DELAY_FACTOR / rate;
+ dev_dbg(&pdev->dev, "register read/write delay is %u nsecs\n",
+ sp->rrw_delay);
+
+ ath79_spi_enable(sp);
+ ret = spi_bitbang_start(&sp->bitbang);
+ if (ret)
+ goto err_disable;
+
+ return 0;
+
+err_disable:
+ ath79_spi_disable(sp);
+err_clk_disable:
+ clk_disable_unprepare(sp->clk);
+err_put_master:
+ spi_master_put(sp->bitbang.master);
+
+ return ret;
+}
+
+static int ath79_spi_remove(struct platform_device *pdev)
+{
+ struct ath79_spi *sp = platform_get_drvdata(pdev);
+
+ spi_bitbang_stop(&sp->bitbang);
+ ath79_spi_disable(sp);
+ clk_disable_unprepare(sp->clk);
+ spi_master_put(sp->bitbang.master);
+
+ return 0;
+}
+
+static void ath79_spi_shutdown(struct platform_device *pdev)
+{
+ ath79_spi_remove(pdev);
+}
+
+static const struct of_device_id ath79_spi_of_match[] = {
+ { .compatible = "qca,ar7100-spi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ath79_spi_of_match);
+
+static struct platform_driver ath79_spi_driver = {
+ .probe = ath79_spi_probe,
+ .remove = ath79_spi_remove,
+ .shutdown = ath79_spi_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ath79_spi_of_match,
+ },
+};
+module_platform_driver(ath79_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Atheros AR71XX/AR724X/AR913X");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
new file mode 100644
index 000000000..78daf2b21
--- /dev/null
+++ b/drivers/spi/spi-atmel.c
@@ -0,0 +1,1802 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Atmel AT32 and AT91 SPI Controllers
+ *
+ * Copyright (C) 2006 Atmel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <linux/io.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
+#include <trace/events/spi.h>
+
+/* SPI register offsets */
+#define SPI_CR 0x0000
+#define SPI_MR 0x0004
+#define SPI_RDR 0x0008
+#define SPI_TDR 0x000c
+#define SPI_SR 0x0010
+#define SPI_IER 0x0014
+#define SPI_IDR 0x0018
+#define SPI_IMR 0x001c
+#define SPI_CSR0 0x0030
+#define SPI_CSR1 0x0034
+#define SPI_CSR2 0x0038
+#define SPI_CSR3 0x003c
+#define SPI_FMR 0x0040
+#define SPI_FLR 0x0044
+#define SPI_VERSION 0x00fc
+#define SPI_RPR 0x0100
+#define SPI_RCR 0x0104
+#define SPI_TPR 0x0108
+#define SPI_TCR 0x010c
+#define SPI_RNPR 0x0110
+#define SPI_RNCR 0x0114
+#define SPI_TNPR 0x0118
+#define SPI_TNCR 0x011c
+#define SPI_PTCR 0x0120
+#define SPI_PTSR 0x0124
+
+/* Bitfields in CR */
+#define SPI_SPIEN_OFFSET 0
+#define SPI_SPIEN_SIZE 1
+#define SPI_SPIDIS_OFFSET 1
+#define SPI_SPIDIS_SIZE 1
+#define SPI_SWRST_OFFSET 7
+#define SPI_SWRST_SIZE 1
+#define SPI_LASTXFER_OFFSET 24
+#define SPI_LASTXFER_SIZE 1
+#define SPI_TXFCLR_OFFSET 16
+#define SPI_TXFCLR_SIZE 1
+#define SPI_RXFCLR_OFFSET 17
+#define SPI_RXFCLR_SIZE 1
+#define SPI_FIFOEN_OFFSET 30
+#define SPI_FIFOEN_SIZE 1
+#define SPI_FIFODIS_OFFSET 31
+#define SPI_FIFODIS_SIZE 1
+
+/* Bitfields in MR */
+#define SPI_MSTR_OFFSET 0
+#define SPI_MSTR_SIZE 1
+#define SPI_PS_OFFSET 1
+#define SPI_PS_SIZE 1
+#define SPI_PCSDEC_OFFSET 2
+#define SPI_PCSDEC_SIZE 1
+#define SPI_FDIV_OFFSET 3
+#define SPI_FDIV_SIZE 1
+#define SPI_MODFDIS_OFFSET 4
+#define SPI_MODFDIS_SIZE 1
+#define SPI_WDRBT_OFFSET 5
+#define SPI_WDRBT_SIZE 1
+#define SPI_LLB_OFFSET 7
+#define SPI_LLB_SIZE 1
+#define SPI_PCS_OFFSET 16
+#define SPI_PCS_SIZE 4
+#define SPI_DLYBCS_OFFSET 24
+#define SPI_DLYBCS_SIZE 8
+
+/* Bitfields in RDR */
+#define SPI_RD_OFFSET 0
+#define SPI_RD_SIZE 16
+
+/* Bitfields in TDR */
+#define SPI_TD_OFFSET 0
+#define SPI_TD_SIZE 16
+
+/* Bitfields in SR */
+#define SPI_RDRF_OFFSET 0
+#define SPI_RDRF_SIZE 1
+#define SPI_TDRE_OFFSET 1
+#define SPI_TDRE_SIZE 1
+#define SPI_MODF_OFFSET 2
+#define SPI_MODF_SIZE 1
+#define SPI_OVRES_OFFSET 3
+#define SPI_OVRES_SIZE 1
+#define SPI_ENDRX_OFFSET 4
+#define SPI_ENDRX_SIZE 1
+#define SPI_ENDTX_OFFSET 5
+#define SPI_ENDTX_SIZE 1
+#define SPI_RXBUFF_OFFSET 6
+#define SPI_RXBUFF_SIZE 1
+#define SPI_TXBUFE_OFFSET 7
+#define SPI_TXBUFE_SIZE 1
+#define SPI_NSSR_OFFSET 8
+#define SPI_NSSR_SIZE 1
+#define SPI_TXEMPTY_OFFSET 9
+#define SPI_TXEMPTY_SIZE 1
+#define SPI_SPIENS_OFFSET 16
+#define SPI_SPIENS_SIZE 1
+#define SPI_TXFEF_OFFSET 24
+#define SPI_TXFEF_SIZE 1
+#define SPI_TXFFF_OFFSET 25
+#define SPI_TXFFF_SIZE 1
+#define SPI_TXFTHF_OFFSET 26
+#define SPI_TXFTHF_SIZE 1
+#define SPI_RXFEF_OFFSET 27
+#define SPI_RXFEF_SIZE 1
+#define SPI_RXFFF_OFFSET 28
+#define SPI_RXFFF_SIZE 1
+#define SPI_RXFTHF_OFFSET 29
+#define SPI_RXFTHF_SIZE 1
+#define SPI_TXFPTEF_OFFSET 30
+#define SPI_TXFPTEF_SIZE 1
+#define SPI_RXFPTEF_OFFSET 31
+#define SPI_RXFPTEF_SIZE 1
+
+/* Bitfields in CSR0 */
+#define SPI_CPOL_OFFSET 0
+#define SPI_CPOL_SIZE 1
+#define SPI_NCPHA_OFFSET 1
+#define SPI_NCPHA_SIZE 1
+#define SPI_CSAAT_OFFSET 3
+#define SPI_CSAAT_SIZE 1
+#define SPI_BITS_OFFSET 4
+#define SPI_BITS_SIZE 4
+#define SPI_SCBR_OFFSET 8
+#define SPI_SCBR_SIZE 8
+#define SPI_DLYBS_OFFSET 16
+#define SPI_DLYBS_SIZE 8
+#define SPI_DLYBCT_OFFSET 24
+#define SPI_DLYBCT_SIZE 8
+
+/* Bitfields in RCR */
+#define SPI_RXCTR_OFFSET 0
+#define SPI_RXCTR_SIZE 16
+
+/* Bitfields in TCR */
+#define SPI_TXCTR_OFFSET 0
+#define SPI_TXCTR_SIZE 16
+
+/* Bitfields in RNCR */
+#define SPI_RXNCR_OFFSET 0
+#define SPI_RXNCR_SIZE 16
+
+/* Bitfields in TNCR */
+#define SPI_TXNCR_OFFSET 0
+#define SPI_TXNCR_SIZE 16
+
+/* Bitfields in PTCR */
+#define SPI_RXTEN_OFFSET 0
+#define SPI_RXTEN_SIZE 1
+#define SPI_RXTDIS_OFFSET 1
+#define SPI_RXTDIS_SIZE 1
+#define SPI_TXTEN_OFFSET 8
+#define SPI_TXTEN_SIZE 1
+#define SPI_TXTDIS_OFFSET 9
+#define SPI_TXTDIS_SIZE 1
+
+/* Bitfields in FMR */
+#define SPI_TXRDYM_OFFSET 0
+#define SPI_TXRDYM_SIZE 2
+#define SPI_RXRDYM_OFFSET 4
+#define SPI_RXRDYM_SIZE 2
+#define SPI_TXFTHRES_OFFSET 16
+#define SPI_TXFTHRES_SIZE 6
+#define SPI_RXFTHRES_OFFSET 24
+#define SPI_RXFTHRES_SIZE 6
+
+/* Bitfields in FLR */
+#define SPI_TXFL_OFFSET 0
+#define SPI_TXFL_SIZE 6
+#define SPI_RXFL_OFFSET 16
+#define SPI_RXFL_SIZE 6
+
+/* Constants for BITS */
+#define SPI_BITS_8_BPT 0
+#define SPI_BITS_9_BPT 1
+#define SPI_BITS_10_BPT 2
+#define SPI_BITS_11_BPT 3
+#define SPI_BITS_12_BPT 4
+#define SPI_BITS_13_BPT 5
+#define SPI_BITS_14_BPT 6
+#define SPI_BITS_15_BPT 7
+#define SPI_BITS_16_BPT 8
+#define SPI_ONE_DATA 0
+#define SPI_TWO_DATA 1
+#define SPI_FOUR_DATA 2
+
+/* Bit manipulation macros */
+#define SPI_BIT(name) \
+ (1 << SPI_##name##_OFFSET)
+#define SPI_BF(name, value) \
+ (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
+#define SPI_BFEXT(name, value) \
+ (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
+#define SPI_BFINS(name, value, old) \
+ (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
+ | SPI_BF(name, value))
+
+/* Register access macros */
+#define spi_readl(port, reg) \
+ readl_relaxed((port)->regs + SPI_##reg)
+#define spi_writel(port, reg, value) \
+ writel_relaxed((value), (port)->regs + SPI_##reg)
+#define spi_writew(port, reg, value) \
+ writew_relaxed((value), (port)->regs + SPI_##reg)
+
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 16
+
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+#define AUTOSUSPEND_TIMEOUT 2000
+
+struct atmel_spi_caps {
+ bool is_spi2;
+ bool has_wdrbt;
+ bool has_dma_support;
+ bool has_pdc_support;
+};
+
+/*
+ * The core SPI transfer engine just talks to a register bank to set up
+ * DMA transfers; transfer queue progress is driven by IRQs. The clock
+ * framework provides the base clock, subdivided for each spi_device.
+ */
+struct atmel_spi {
+ spinlock_t lock;
+ unsigned long flags;
+
+ phys_addr_t phybase;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct platform_device *pdev;
+ unsigned long spi_clk;
+
+ struct spi_transfer *current_transfer;
+ int current_remaining_bytes;
+ int done_status;
+ dma_addr_t dma_addr_rx_bbuf;
+ dma_addr_t dma_addr_tx_bbuf;
+ void *addr_rx_bbuf;
+ void *addr_tx_bbuf;
+
+ struct completion xfer_completion;
+
+ struct atmel_spi_caps caps;
+
+ bool use_dma;
+ bool use_pdc;
+
+ bool keep_cs;
+
+ u32 fifo_size;
+ bool last_polarity;
+ u8 native_cs_free;
+ u8 native_cs_for_gpio;
+};
+
+/* Controller-specific per-slave state */
+struct atmel_spi_device {
+ u32 csr;
+};
+
+#define SPI_MAX_DMA_XFER 65535 /* true for both PDC and DMA */
+#define INVALID_DMA_ADDRESS 0xffffffff
+
+/*
+ * This frequency can be anything supported by the controller, but to avoid
+ * unnecessary delay, the highest possible frequency is chosen.
+ *
+ * This frequency is the highest possible which is not interfering with other
+ * chip select registers (see Note for Serial Clock Bit Rate configuration in
+ * Atmel-11121F-ATARM-SAMA5D3-Series-Datasheet_02-Feb-16, page 1283)
+ */
+#define DUMMY_MSG_FREQUENCY 0x02
+/*
+ * 8 bits is the minimum data the controller is capable of sending.
+ *
+ * This message can be anything as it should not be treated by any SPI device.
+ */
+#define DUMMY_MSG 0xAA
+
+/*
+ * Version 2 of the SPI controller has
+ * - CR.LASTXFER
+ * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
+ * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
+ * - SPI_CSRx.CSAAT
+ * - SPI_CSRx.SBCR allows faster clocking
+ */
+static bool atmel_spi_is_v2(struct atmel_spi *as)
+{
+ return as->caps.is_spi2;
+}
+
+/*
+ * Send a dummy message.
+ *
+ * This is sometimes needed when using a CS GPIO to force clock transition when
+ * switching between devices with different polarities.
+ */
+static void atmel_spi_send_dummy(struct atmel_spi *as, struct spi_device *spi, int chip_select)
+{
+ u32 status;
+ u32 csr;
+
+ /*
+ * Set a clock frequency to allow sending message on SPI bus.
+ * The frequency here can be anything, but is needed for
+ * the controller to send the data.
+ */
+ csr = spi_readl(as, CSR0 + 4 * chip_select);
+ csr = SPI_BFINS(SCBR, DUMMY_MSG_FREQUENCY, csr);
+ spi_writel(as, CSR0 + 4 * chip_select, csr);
+
+ /*
+ * Read all data coming from SPI bus, needed to be able to send
+ * the message.
+ */
+ spi_readl(as, RDR);
+ while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
+ spi_readl(as, RDR);
+ cpu_relax();
+ }
+
+ spi_writel(as, TDR, DUMMY_MSG);
+
+ readl_poll_timeout_atomic(as->regs + SPI_SR, status,
+ (status & SPI_BIT(TXEMPTY)), 1, 1000);
+}
+
+
+/*
+ * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
+ * they assume that spi slave device state will not change on deselect, so
+ * that automagic deselection is OK. ("NPCSx rises if no data is to be
+ * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
+ * controllers have CSAAT and friends.
+ *
+ * Even controller newer than ar91rm9200, using GPIOs can make sens as
+ * it lets us support active-high chipselects despite the controller's
+ * belief that only active-low devices/systems exists.
+ *
+ * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
+ * right when driven with GPIO. ("Mode Fault does not allow more than one
+ * Master on Chip Select 0.") No workaround exists for that ... so for
+ * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
+ * and (c) will trigger that first erratum in some cases.
+ *
+ * When changing the clock polarity, the SPI controller waits for the next
+ * transmission to enforce the default clock state. This may be an issue when
+ * using a GPIO as Chip Select: the clock level is applied only when the first
+ * packet is sent, once the CS has already been asserted. The workaround is to
+ * avoid this by sending a first (dummy) message before toggling the CS state.
+ */
+static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
+{
+ struct atmel_spi_device *asd = spi->controller_state;
+ bool new_polarity;
+ int chip_select;
+ u32 mr;
+
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
+
+ if (atmel_spi_is_v2(as)) {
+ spi_writel(as, CSR0 + 4 * chip_select, asd->csr);
+ /* For the low SPI version, there is a issue that PDC transfer
+ * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
+ */
+ spi_writel(as, CSR0, asd->csr);
+ if (as->caps.has_wdrbt) {
+ spi_writel(as, MR,
+ SPI_BF(PCS, ~(0x01 << chip_select))
+ | SPI_BIT(WDRBT)
+ | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ } else {
+ spi_writel(as, MR,
+ SPI_BF(PCS, ~(0x01 << chip_select))
+ | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ }
+
+ mr = spi_readl(as, MR);
+
+ /*
+ * Ensures the clock polarity is valid before we actually
+ * assert the CS to avoid spurious clock edges to be
+ * processed by the spi devices.
+ */
+ if (spi_get_csgpiod(spi, 0)) {
+ new_polarity = (asd->csr & SPI_BIT(CPOL)) != 0;
+ if (new_polarity != as->last_polarity) {
+ /*
+ * Need to disable the GPIO before sending the dummy
+ * message because it is already set by the spi core.
+ */
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 0);
+ atmel_spi_send_dummy(as, spi, chip_select);
+ as->last_polarity = new_polarity;
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 1);
+ }
+ }
+ } else {
+ u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
+ int i;
+ u32 csr;
+
+ /* Make sure clock polarity is correct */
+ for (i = 0; i < spi->master->num_chipselect; i++) {
+ csr = spi_readl(as, CSR0 + 4 * i);
+ if ((csr ^ cpol) & SPI_BIT(CPOL))
+ spi_writel(as, CSR0 + 4 * i,
+ csr ^ SPI_BIT(CPOL));
+ }
+
+ mr = spi_readl(as, MR);
+ mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
+ spi_writel(as, MR, mr);
+ }
+
+ dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr);
+}
+
+static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
+{
+ int chip_select;
+ u32 mr;
+
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
+
+ /* only deactivate *this* device; sometimes transfers to
+ * another device may be active when this routine is called.
+ */
+ mr = spi_readl(as, MR);
+ if (~SPI_BFEXT(PCS, mr) & (1 << chip_select)) {
+ mr = SPI_BFINS(PCS, 0xf, mr);
+ spi_writel(as, MR, mr);
+ }
+
+ dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
+
+ if (!spi->cs_gpiod)
+ spi_writel(as, CR, SPI_BIT(LASTXFER));
+}
+
+static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
+{
+ spin_lock_irqsave(&as->lock, as->flags);
+}
+
+static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
+{
+ spin_unlock_irqrestore(&as->lock, as->flags);
+}
+
+static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
+{
+ return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
+}
+
+static inline bool atmel_spi_use_dma(struct atmel_spi *as,
+ struct spi_transfer *xfer)
+{
+ return as->use_dma && xfer->len >= DMA_MIN_BYTES;
+}
+
+static bool atmel_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
+ return atmel_spi_use_dma(as, xfer) &&
+ !atmel_spi_is_vmalloc_xfer(xfer);
+ else
+ return atmel_spi_use_dma(as, xfer);
+
+}
+
+static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
+{
+ struct spi_master *master = platform_get_drvdata(as->pdev);
+ struct dma_slave_config slave_config;
+ int err = 0;
+
+ if (bits_per_word > 8) {
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ }
+
+ slave_config.dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
+ slave_config.src_addr = (dma_addr_t)as->phybase + SPI_RDR;
+ slave_config.src_maxburst = 1;
+ slave_config.dst_maxburst = 1;
+ slave_config.device_fc = false;
+
+ /*
+ * This driver uses fixed peripheral select mode (PS bit set to '0' in
+ * the Mode Register).
+ * So according to the datasheet, when FIFOs are available (and
+ * enabled), the Transmit FIFO operates in Multiple Data Mode.
+ * In this mode, up to 2 data, not 4, can be written into the Transmit
+ * Data Register in a single access.
+ * However, the first data has to be written into the lowest 16 bits and
+ * the second data into the highest 16 bits of the Transmit
+ * Data Register. For 8bit data (the most frequent case), it would
+ * require to rework tx_buf so each data would actually fit 16 bits.
+ * So we'd rather write only one data at the time. Hence the transmit
+ * path works the same whether FIFOs are available (and enabled) or not.
+ */
+ if (dmaengine_slave_config(master->dma_tx, &slave_config)) {
+ dev_err(&as->pdev->dev,
+ "failed to configure tx dma channel\n");
+ err = -EINVAL;
+ }
+
+ /*
+ * This driver configures the spi controller for master mode (MSTR bit
+ * set to '1' in the Mode Register).
+ * So according to the datasheet, when FIFOs are available (and
+ * enabled), the Receive FIFO operates in Single Data Mode.
+ * So the receive path works the same whether FIFOs are available (and
+ * enabled) or not.
+ */
+ if (dmaengine_slave_config(master->dma_rx, &slave_config)) {
+ dev_err(&as->pdev->dev,
+ "failed to configure rx dma channel\n");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int atmel_spi_configure_dma(struct spi_master *master,
+ struct atmel_spi *as)
+{
+ struct device *dev = &as->pdev->dev;
+ int err;
+
+ master->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(master->dma_tx)) {
+ err = PTR_ERR(master->dma_tx);
+ dev_dbg(dev, "No TX DMA channel, DMA is disabled\n");
+ goto error_clear;
+ }
+
+ master->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(master->dma_rx)) {
+ err = PTR_ERR(master->dma_rx);
+ /*
+ * No reason to check EPROBE_DEFER here since we have already
+ * requested tx channel.
+ */
+ dev_dbg(dev, "No RX DMA channel, DMA is disabled\n");
+ goto error;
+ }
+
+ err = atmel_spi_dma_slave_config(as, 8);
+ if (err)
+ goto error;
+
+ dev_info(&as->pdev->dev,
+ "Using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(master->dma_tx),
+ dma_chan_name(master->dma_rx));
+
+ return 0;
+error:
+ if (!IS_ERR(master->dma_rx))
+ dma_release_channel(master->dma_rx);
+ if (!IS_ERR(master->dma_tx))
+ dma_release_channel(master->dma_tx);
+error_clear:
+ master->dma_tx = master->dma_rx = NULL;
+ return err;
+}
+
+static void atmel_spi_stop_dma(struct spi_master *master)
+{
+ if (master->dma_rx)
+ dmaengine_terminate_all(master->dma_rx);
+ if (master->dma_tx)
+ dmaengine_terminate_all(master->dma_tx);
+}
+
+static void atmel_spi_release_dma(struct spi_master *master)
+{
+ if (master->dma_rx) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+ if (master->dma_tx) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+}
+
+/* This function is called by the DMA driver from tasklet context */
+static void dma_callback(void *data)
+{
+ struct spi_master *master = data;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
+ as->current_transfer->len);
+ }
+ complete(&as->xfer_completion);
+}
+
+/*
+ * Next transfer using PIO without FIFO.
+ */
+static void atmel_spi_next_xfer_single(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
+
+ dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
+
+ /* Make sure data is not remaining in RDR */
+ spi_readl(as, RDR);
+ while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
+ spi_readl(as, RDR);
+ cpu_relax();
+ }
+
+ if (xfer->bits_per_word > 8)
+ spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
+ else
+ spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
+
+ dev_dbg(master->dev.parent,
+ " start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
+ xfer->bits_per_word);
+
+ /* Enable relevant interrupts */
+ spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
+}
+
+/*
+ * Next transfer using PIO with FIFO.
+ */
+static void atmel_spi_next_xfer_fifo(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 current_remaining_data, num_data;
+ u32 offset = xfer->len - as->current_remaining_bytes;
+ const u16 *words = (const u16 *)((u8 *)xfer->tx_buf + offset);
+ const u8 *bytes = (const u8 *)((u8 *)xfer->tx_buf + offset);
+ u16 td0, td1;
+ u32 fifomr;
+
+ dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_fifo\n");
+
+ /* Compute the number of data to transfer in the current iteration */
+ current_remaining_data = ((xfer->bits_per_word > 8) ?
+ ((u32)as->current_remaining_bytes >> 1) :
+ (u32)as->current_remaining_bytes);
+ num_data = min(current_remaining_data, as->fifo_size);
+
+ /* Flush RX and TX FIFOs */
+ spi_writel(as, CR, SPI_BIT(RXFCLR) | SPI_BIT(TXFCLR));
+ while (spi_readl(as, FLR))
+ cpu_relax();
+
+ /* Set RX FIFO Threshold to the number of data to transfer */
+ fifomr = spi_readl(as, FMR);
+ spi_writel(as, FMR, SPI_BFINS(RXFTHRES, num_data, fifomr));
+
+ /* Clear FIFO flags in the Status Register, especially RXFTHF */
+ (void)spi_readl(as, SR);
+
+ /* Fill TX FIFO */
+ while (num_data >= 2) {
+ if (xfer->bits_per_word > 8) {
+ td0 = *words++;
+ td1 = *words++;
+ } else {
+ td0 = *bytes++;
+ td1 = *bytes++;
+ }
+
+ spi_writel(as, TDR, (td1 << 16) | td0);
+ num_data -= 2;
+ }
+
+ if (num_data) {
+ if (xfer->bits_per_word > 8)
+ td0 = *words++;
+ else
+ td0 = *bytes++;
+
+ spi_writew(as, TDR, td0);
+ num_data--;
+ }
+
+ dev_dbg(master->dev.parent,
+ " start fifo xfer %p: len %u tx %p rx %p bitpw %d\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
+ xfer->bits_per_word);
+
+ /*
+ * Enable RX FIFO Threshold Flag interrupt to be notified about
+ * transfer completion.
+ */
+ spi_writel(as, IER, SPI_BIT(RXFTHF) | SPI_BIT(OVRES));
+}
+
+/*
+ * Next transfer using PIO.
+ */
+static void atmel_spi_next_xfer_pio(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ if (as->fifo_size)
+ atmel_spi_next_xfer_fifo(master, xfer);
+ else
+ atmel_spi_next_xfer_single(master, xfer);
+}
+
+/*
+ * Submit next transfer for DMA.
+ */
+static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
+ struct spi_transfer *xfer,
+ u32 *plen)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ struct dma_chan *rxchan = master->dma_rx;
+ struct dma_chan *txchan = master->dma_tx;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+ dma_cookie_t cookie;
+
+ dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
+
+ /* Check that the channels are available */
+ if (!rxchan || !txchan)
+ return -ENODEV;
+
+
+ *plen = xfer->len;
+
+ if (atmel_spi_dma_slave_config(as, xfer->bits_per_word))
+ goto err_exit;
+
+ /* Send both scatterlists */
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ rxdesc = dmaengine_prep_slave_single(rxchan,
+ as->dma_addr_rx_bbuf,
+ xfer->len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
+ if (!rxdesc)
+ goto err_dma;
+
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
+ txdesc = dmaengine_prep_slave_single(txchan,
+ as->dma_addr_tx_bbuf,
+ xfer->len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
+ if (!txdesc)
+ goto err_dma;
+
+ dev_dbg(master->dev.parent,
+ " start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
+ xfer->rx_buf, (unsigned long long)xfer->rx_dma);
+
+ /* Enable relevant interrupts */
+ spi_writel(as, IER, SPI_BIT(OVRES));
+
+ /* Put the callback on the RX transfer only, that should finish last */
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = master;
+
+ /* Submit and fire RX and TX with TX last so we're ready to read! */
+ cookie = rxdesc->tx_submit(rxdesc);
+ if (dma_submit_error(cookie))
+ goto err_dma;
+ cookie = txdesc->tx_submit(txdesc);
+ if (dma_submit_error(cookie))
+ goto err_dma;
+ rxchan->device->device_issue_pending(rxchan);
+ txchan->device->device_issue_pending(txchan);
+
+ return 0;
+
+err_dma:
+ spi_writel(as, IDR, SPI_BIT(OVRES));
+ atmel_spi_stop_dma(master);
+err_exit:
+ return -ENOMEM;
+}
+
+static void atmel_spi_next_xfer_data(struct spi_master *master,
+ struct spi_transfer *xfer,
+ dma_addr_t *tx_dma,
+ dma_addr_t *rx_dma,
+ u32 *plen)
+{
+ *rx_dma = xfer->rx_dma + xfer->len - *plen;
+ *tx_dma = xfer->tx_dma + xfer->len - *plen;
+ if (*plen > master->max_dma_len)
+ *plen = master->max_dma_len;
+}
+
+static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ u32 scbr, csr;
+ unsigned long bus_hz;
+ int chip_select;
+
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
+
+ /* v1 chips start out at half the peripheral bus speed. */
+ bus_hz = as->spi_clk;
+ if (!atmel_spi_is_v2(as))
+ bus_hz /= 2;
+
+ /*
+ * Calculate the lowest divider that satisfies the
+ * constraint, assuming div32/fdiv/mbz == 0.
+ */
+ scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
+
+ /*
+ * If the resulting divider doesn't fit into the
+ * register bitfield, we can't satisfy the constraint.
+ */
+ if (scbr >= (1 << SPI_SCBR_SIZE)) {
+ dev_err(&spi->dev,
+ "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
+ xfer->speed_hz, scbr, bus_hz/255);
+ return -EINVAL;
+ }
+ if (scbr == 0) {
+ dev_err(&spi->dev,
+ "setup: %d Hz too high, scbr %u; max %ld Hz\n",
+ xfer->speed_hz, scbr, bus_hz);
+ return -EINVAL;
+ }
+ csr = spi_readl(as, CSR0 + 4 * chip_select);
+ csr = SPI_BFINS(SCBR, scbr, csr);
+ spi_writel(as, CSR0 + 4 * chip_select, csr);
+ xfer->effective_speed_hz = bus_hz / scbr;
+
+ return 0;
+}
+
+/*
+ * Submit next transfer for PDC.
+ * lock is held, spi irq is blocked
+ */
+static void atmel_spi_pdc_next_xfer(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 len;
+ dma_addr_t tx_dma, rx_dma;
+
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+
+ len = as->current_remaining_bytes;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ as->current_remaining_bytes -= len;
+
+ spi_writel(as, RPR, rx_dma);
+ spi_writel(as, TPR, tx_dma);
+
+ if (xfer->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RCR, len);
+ spi_writel(as, TCR, len);
+
+ dev_dbg(&master->dev,
+ " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf,
+ (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+ (unsigned long long)xfer->rx_dma);
+
+ if (as->current_remaining_bytes) {
+ len = as->current_remaining_bytes;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ as->current_remaining_bytes -= len;
+
+ spi_writel(as, RNPR, rx_dma);
+ spi_writel(as, TNPR, tx_dma);
+
+ if (xfer->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RNCR, len);
+ spi_writel(as, TNCR, len);
+
+ dev_dbg(&master->dev,
+ " next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf,
+ (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+ (unsigned long long)xfer->rx_dma);
+ }
+
+ /* REVISIT: We're waiting for RXBUFF before we start the next
+ * transfer because we need to handle some difficult timing
+ * issues otherwise. If we wait for TXBUFE in one transfer and
+ * then starts waiting for RXBUFF in the next, it's difficult
+ * to tell the difference between the RXBUFF interrupt we're
+ * actually waiting for and the RXBUFF interrupt of the
+ * previous transfer.
+ *
+ * It should be doable, though. Just not now...
+ */
+ spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
+ spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
+}
+
+/*
+ * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
+ * - The buffer is either valid for CPU access, else NULL
+ * - If the buffer is valid, so is its DMA address
+ *
+ * This driver manages the dma address unless message->is_dma_mapped.
+ */
+static int
+atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
+{
+ struct device *dev = &as->pdev->dev;
+
+ xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
+ if (xfer->tx_buf) {
+ /* tx_buf is a const void* where we need a void * for the dma
+ * mapping */
+ void *nonconst_tx = (void *)xfer->tx_buf;
+
+ xfer->tx_dma = dma_map_single(dev,
+ nonconst_tx, xfer->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, xfer->tx_dma))
+ return -ENOMEM;
+ }
+ if (xfer->rx_buf) {
+ xfer->rx_dma = dma_map_single(dev,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, xfer->rx_dma)) {
+ if (xfer->tx_buf)
+ dma_unmap_single(dev,
+ xfer->tx_dma, xfer->len,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ if (xfer->tx_dma != INVALID_DMA_ADDRESS)
+ dma_unmap_single(master->dev.parent, xfer->tx_dma,
+ xfer->len, DMA_TO_DEVICE);
+ if (xfer->rx_dma != INVALID_DMA_ADDRESS)
+ dma_unmap_single(master->dev.parent, xfer->rx_dma,
+ xfer->len, DMA_FROM_DEVICE);
+}
+
+static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
+{
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+}
+
+static void
+atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
+{
+ u8 *rxp;
+ u16 *rxp16;
+ unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
+
+ if (xfer->bits_per_word > 8) {
+ rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
+ *rxp16 = spi_readl(as, RDR);
+ } else {
+ rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+ *rxp = spi_readl(as, RDR);
+ }
+ if (xfer->bits_per_word > 8) {
+ if (as->current_remaining_bytes > 2)
+ as->current_remaining_bytes -= 2;
+ else
+ as->current_remaining_bytes = 0;
+ } else {
+ as->current_remaining_bytes--;
+ }
+}
+
+static void
+atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
+{
+ u32 fifolr = spi_readl(as, FLR);
+ u32 num_bytes, num_data = SPI_BFEXT(RXFL, fifolr);
+ u32 offset = xfer->len - as->current_remaining_bytes;
+ u16 *words = (u16 *)((u8 *)xfer->rx_buf + offset);
+ u8 *bytes = (u8 *)((u8 *)xfer->rx_buf + offset);
+ u16 rd; /* RD field is the lowest 16 bits of RDR */
+
+ /* Update the number of remaining bytes to transfer */
+ num_bytes = ((xfer->bits_per_word > 8) ?
+ (num_data << 1) :
+ num_data);
+
+ if (as->current_remaining_bytes > num_bytes)
+ as->current_remaining_bytes -= num_bytes;
+ else
+ as->current_remaining_bytes = 0;
+
+ /* Handle odd number of bytes when data are more than 8bit width */
+ if (xfer->bits_per_word > 8)
+ as->current_remaining_bytes &= ~0x1;
+
+ /* Read data */
+ while (num_data) {
+ rd = spi_readl(as, RDR);
+ if (xfer->bits_per_word > 8)
+ *words++ = rd;
+ else
+ *bytes++ = rd;
+ num_data--;
+ }
+}
+
+/* Called from IRQ
+ *
+ * Must update "current_remaining_bytes" to keep track of data
+ * to transfer.
+ */
+static void
+atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
+{
+ if (as->fifo_size)
+ atmel_spi_pump_fifo_data(as, xfer);
+ else
+ atmel_spi_pump_single_data(as, xfer);
+}
+
+/* Interrupt
+ *
+ */
+static irqreturn_t
+atmel_spi_pio_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 status, pending, imr;
+ struct spi_transfer *xfer;
+ int ret = IRQ_NONE;
+
+ imr = spi_readl(as, IMR);
+ status = spi_readl(as, SR);
+ pending = status & imr;
+
+ if (pending & SPI_BIT(OVRES)) {
+ ret = IRQ_HANDLED;
+ spi_writel(as, IDR, SPI_BIT(OVRES));
+ dev_warn(master->dev.parent, "overrun\n");
+
+ /*
+ * When we get an overrun, we disregard the current
+ * transfer. Data will not be copied back from any
+ * bounce buffer and msg->actual_len will not be
+ * updated with the last xfer.
+ *
+ * We will also not process any remaning transfers in
+ * the message.
+ */
+ as->done_status = -EIO;
+ smp_wmb();
+
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
+
+ complete(&as->xfer_completion);
+
+ } else if (pending & (SPI_BIT(RDRF) | SPI_BIT(RXFTHF))) {
+ atmel_spi_lock(as);
+
+ if (as->current_remaining_bytes) {
+ ret = IRQ_HANDLED;
+ xfer = as->current_transfer;
+ atmel_spi_pump_pio_data(as, xfer);
+ if (!as->current_remaining_bytes)
+ spi_writel(as, IDR, pending);
+
+ complete(&as->xfer_completion);
+ }
+
+ atmel_spi_unlock(as);
+ } else {
+ WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending);
+ ret = IRQ_HANDLED;
+ spi_writel(as, IDR, pending);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+atmel_spi_pdc_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 status, pending, imr;
+ int ret = IRQ_NONE;
+
+ imr = spi_readl(as, IMR);
+ status = spi_readl(as, SR);
+ pending = status & imr;
+
+ if (pending & SPI_BIT(OVRES)) {
+
+ ret = IRQ_HANDLED;
+
+ spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
+ | SPI_BIT(OVRES)));
+
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
+
+ as->done_status = -EIO;
+
+ complete(&as->xfer_completion);
+
+ } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
+ ret = IRQ_HANDLED;
+
+ spi_writel(as, IDR, pending);
+
+ complete(&as->xfer_completion);
+ }
+
+ return ret;
+}
+
+static int atmel_word_delay_csr(struct spi_device *spi, struct atmel_spi *as)
+{
+ struct spi_delay *delay = &spi->word_delay;
+ u32 value = delay->value;
+
+ switch (delay->unit) {
+ case SPI_DELAY_UNIT_NSECS:
+ value /= 1000;
+ break;
+ case SPI_DELAY_UNIT_USECS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (as->spi_clk / 1000000 * value) >> 5;
+}
+
+static void initialize_native_cs_for_gpio(struct atmel_spi *as)
+{
+ int i;
+ struct spi_master *master = platform_get_drvdata(as->pdev);
+
+ if (!as->native_cs_free)
+ return; /* already initialized */
+
+ if (!master->cs_gpiods)
+ return; /* No CS GPIO */
+
+ /*
+ * On the first version of the controller (AT91RM9200), CS0
+ * can't be used associated with GPIO
+ */
+ if (atmel_spi_is_v2(as))
+ i = 0;
+ else
+ i = 1;
+
+ for (; i < 4; i++)
+ if (master->cs_gpiods[i])
+ as->native_cs_free |= BIT(i);
+
+ if (as->native_cs_free)
+ as->native_cs_for_gpio = ffs(as->native_cs_free);
+}
+
+static int atmel_spi_setup(struct spi_device *spi)
+{
+ struct atmel_spi *as;
+ struct atmel_spi_device *asd;
+ u32 csr;
+ unsigned int bits = spi->bits_per_word;
+ int chip_select;
+ int word_delay_csr;
+
+ as = spi_master_get_devdata(spi->master);
+
+ /* see notes above re chipselect */
+ if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH)) {
+ dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
+ return -EINVAL;
+ }
+
+ /* Setup() is called during spi_register_controller(aka
+ * spi_register_master) but after all membmers of the cs_gpiod
+ * array have been filled, so we can looked for which native
+ * CS will be free for using with GPIO
+ */
+ initialize_native_cs_for_gpio(as);
+
+ if (spi->cs_gpiod && as->native_cs_free) {
+ dev_err(&spi->dev,
+ "No native CS available to support this GPIO CS\n");
+ return -EBUSY;
+ }
+
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
+
+ csr = SPI_BF(BITS, bits - 8);
+ if (spi->mode & SPI_CPOL)
+ csr |= SPI_BIT(CPOL);
+ if (!(spi->mode & SPI_CPHA))
+ csr |= SPI_BIT(NCPHA);
+
+ if (!spi->cs_gpiod)
+ csr |= SPI_BIT(CSAAT);
+ csr |= SPI_BF(DLYBS, 0);
+
+ word_delay_csr = atmel_word_delay_csr(spi, as);
+ if (word_delay_csr < 0)
+ return word_delay_csr;
+
+ /* DLYBCT adds delays between words. This is useful for slow devices
+ * that need a bit of time to setup the next transfer.
+ */
+ csr |= SPI_BF(DLYBCT, word_delay_csr);
+
+ asd = spi->controller_state;
+ if (!asd) {
+ asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
+ if (!asd)
+ return -ENOMEM;
+
+ spi->controller_state = asd;
+ }
+
+ asd->csr = csr;
+
+ dev_dbg(&spi->dev,
+ "setup: bpw %u mode 0x%x -> csr%d %08x\n",
+ bits, spi->mode, spi->chip_select, csr);
+
+ if (!atmel_spi_is_v2(as))
+ spi_writel(as, CSR0 + 4 * chip_select, csr);
+
+ return 0;
+}
+
+static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct atmel_spi *as = spi_master_get_devdata(spi->master);
+ /* the core doesn't really pass us enable/disable, but CS HIGH vs CS LOW
+ * since we already have routines for activate/deactivate translate
+ * high/low to active/inactive
+ */
+ enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
+
+ if (enable) {
+ cs_activate(as, spi);
+ } else {
+ cs_deactivate(as, spi);
+ }
+
+}
+
+static int atmel_spi_one_transfer(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as;
+ u8 bits;
+ u32 len;
+ struct atmel_spi_device *asd;
+ int timeout;
+ int ret;
+ unsigned long dma_timeout;
+
+ as = spi_master_get_devdata(master);
+
+ asd = spi->controller_state;
+ bits = (asd->csr >> 4) & 0xf;
+ if (bits != xfer->bits_per_word - 8) {
+ dev_dbg(&spi->dev,
+ "you can't yet change bits_per_word in transfers\n");
+ return -ENOPROTOOPT;
+ }
+
+ /*
+ * DMA map early, for performance (empties dcache ASAP) and
+ * better fault reporting.
+ */
+ if ((!master->cur_msg->is_dma_mapped)
+ && as->use_pdc) {
+ if (atmel_spi_dma_map_xfer(as, xfer) < 0)
+ return -ENOMEM;
+ }
+
+ atmel_spi_set_xfer_speed(as, spi, xfer);
+
+ as->done_status = 0;
+ as->current_transfer = xfer;
+ as->current_remaining_bytes = xfer->len;
+ while (as->current_remaining_bytes) {
+ reinit_completion(&as->xfer_completion);
+
+ if (as->use_pdc) {
+ atmel_spi_lock(as);
+ atmel_spi_pdc_next_xfer(master, xfer);
+ atmel_spi_unlock(as);
+ } else if (atmel_spi_use_dma(as, xfer)) {
+ len = as->current_remaining_bytes;
+ ret = atmel_spi_next_xfer_dma_submit(master,
+ xfer, &len);
+ if (ret) {
+ dev_err(&spi->dev,
+ "unable to use DMA, fallback to PIO\n");
+ as->done_status = ret;
+ break;
+ } else {
+ as->current_remaining_bytes -= len;
+ if (as->current_remaining_bytes < 0)
+ as->current_remaining_bytes = 0;
+ }
+ } else {
+ atmel_spi_lock(as);
+ atmel_spi_next_xfer_pio(master, xfer);
+ atmel_spi_unlock(as);
+ }
+
+ dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(dma_timeout == 0)) {
+ dev_err(&spi->dev, "spi transfer timeout\n");
+ as->done_status = -EIO;
+ }
+
+ if (as->done_status)
+ break;
+ }
+
+ if (as->done_status) {
+ if (as->use_pdc) {
+ dev_warn(master->dev.parent,
+ "overrun (%u/%u remaining)\n",
+ spi_readl(as, TCR), spi_readl(as, RCR));
+
+ /*
+ * Clean up DMA registers and make sure the data
+ * registers are empty.
+ */
+ spi_writel(as, RNCR, 0);
+ spi_writel(as, TNCR, 0);
+ spi_writel(as, RCR, 0);
+ spi_writel(as, TCR, 0);
+ for (timeout = 1000; timeout; timeout--)
+ if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
+ break;
+ if (!timeout)
+ dev_warn(master->dev.parent,
+ "timeout waiting for TXEMPTY");
+ while (spi_readl(as, SR) & SPI_BIT(RDRF))
+ spi_readl(as, RDR);
+
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
+
+ } else if (atmel_spi_use_dma(as, xfer)) {
+ atmel_spi_stop_dma(master);
+ }
+ }
+
+ if (!master->cur_msg->is_dma_mapped
+ && as->use_pdc)
+ atmel_spi_dma_unmap_xfer(master, xfer);
+
+ if (as->use_pdc)
+ atmel_spi_disable_pdc_transfer(as);
+
+ return as->done_status;
+}
+
+static void atmel_spi_cleanup(struct spi_device *spi)
+{
+ struct atmel_spi_device *asd = spi->controller_state;
+
+ if (!asd)
+ return;
+
+ spi->controller_state = NULL;
+ kfree(asd);
+}
+
+static inline unsigned int atmel_get_version(struct atmel_spi *as)
+{
+ return spi_readl(as, VERSION) & 0x00000fff;
+}
+
+static void atmel_get_caps(struct atmel_spi *as)
+{
+ unsigned int version;
+
+ version = atmel_get_version(as);
+
+ as->caps.is_spi2 = version > 0x121;
+ as->caps.has_wdrbt = version >= 0x210;
+ as->caps.has_dma_support = version >= 0x212;
+ as->caps.has_pdc_support = version < 0x212;
+}
+
+static void atmel_spi_init(struct atmel_spi *as)
+{
+ spi_writel(as, CR, SPI_BIT(SWRST));
+ spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
+
+ /* It is recommended to enable FIFOs first thing after reset */
+ if (as->fifo_size)
+ spi_writel(as, CR, SPI_BIT(FIFOEN));
+
+ if (as->caps.has_wdrbt) {
+ spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ } else {
+ spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
+ }
+
+ if (as->use_pdc)
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+ spi_writel(as, CR, SPI_BIT(SPIEN));
+}
+
+static int atmel_spi_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ int irq;
+ struct clk *clk;
+ int ret;
+ struct spi_master *master;
+ struct atmel_spi *as;
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ clk = devm_clk_get(&pdev->dev, "spi_clk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ /* setup spi core then atmel-specific driver state */
+ master = spi_alloc_master(&pdev->dev, sizeof(*as));
+ if (!master)
+ return -ENOMEM;
+
+ /* the spi->mode bits understood by this driver: */
+ master->use_gpio_descriptors = true;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->num_chipselect = 4;
+ master->setup = atmel_spi_setup;
+ master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX |
+ SPI_MASTER_GPIO_SS);
+ master->transfer_one = atmel_spi_one_transfer;
+ master->set_cs = atmel_spi_set_cs;
+ master->cleanup = atmel_spi_cleanup;
+ master->auto_runtime_pm = true;
+ master->max_dma_len = SPI_MAX_DMA_XFER;
+ master->can_dma = atmel_spi_can_dma;
+ platform_set_drvdata(pdev, master);
+
+ as = spi_master_get_devdata(master);
+
+ spin_lock_init(&as->lock);
+
+ as->pdev = pdev;
+ as->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(as->regs)) {
+ ret = PTR_ERR(as->regs);
+ goto out_unmap_regs;
+ }
+ as->phybase = regs->start;
+ as->irq = irq;
+ as->clk = clk;
+
+ init_completion(&as->xfer_completion);
+
+ atmel_get_caps(as);
+
+ as->use_dma = false;
+ as->use_pdc = false;
+ if (as->caps.has_dma_support) {
+ ret = atmel_spi_configure_dma(master, as);
+ if (ret == 0) {
+ as->use_dma = true;
+ } else if (ret == -EPROBE_DEFER) {
+ goto out_unmap_regs;
+ }
+ } else if (as->caps.has_pdc_support) {
+ as->use_pdc = true;
+ }
+
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_rx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_rx_bbuf) {
+ as->use_dma = false;
+ } else {
+ as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_tx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_tx_bbuf) {
+ as->use_dma = false;
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
+ }
+ if (!as->use_dma)
+ dev_info(master->dev.parent,
+ " can not allocate dma coherent memory\n");
+ }
+
+ if (as->caps.has_dma_support && !as->use_dma)
+ dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
+
+ if (as->use_pdc) {
+ ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
+ 0, dev_name(&pdev->dev), master);
+ } else {
+ ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
+ 0, dev_name(&pdev->dev), master);
+ }
+ if (ret)
+ goto out_unmap_regs;
+
+ /* Initialize the hardware */
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_free_irq;
+
+ as->spi_clk = clk_get_rate(clk);
+
+ as->fifo_size = 0;
+ if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
+ &as->fifo_size)) {
+ dev_info(&pdev->dev, "Using FIFO (%u data)\n", as->fifo_size);
+ }
+
+ atmel_spi_init(as);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ goto out_free_dma;
+
+ /* go! */
+ dev_info(&pdev->dev, "Atmel SPI Controller version 0x%x at 0x%08lx (irq %d)\n",
+ atmel_get_version(as), (unsigned long)regs->start,
+ irq);
+
+ return 0;
+
+out_free_dma:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ if (as->use_dma)
+ atmel_spi_release_dma(master);
+
+ spi_writel(as, CR, SPI_BIT(SWRST));
+ spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
+ clk_disable_unprepare(clk);
+out_free_irq:
+out_unmap_regs:
+ spi_master_put(master);
+ return ret;
+}
+
+static int atmel_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* reset the hardware and block queue progress */
+ if (as->use_dma) {
+ atmel_spi_stop_dma(master);
+ atmel_spi_release_dma(master);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_tx_bbuf,
+ as->dma_addr_tx_bbuf);
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
+ }
+
+ spin_lock_irq(&as->lock);
+ spi_writel(as, CR, SPI_BIT(SWRST));
+ spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
+ spi_readl(as, SR);
+ spin_unlock_irq(&as->lock);
+
+ clk_disable_unprepare(as->clk);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int atmel_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(as->clk);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int atmel_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ pinctrl_pm_select_default_state(dev);
+
+ return clk_prepare_enable(as->clk);
+}
+
+static int atmel_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ /* Stop the queue running */
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ atmel_spi_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int atmel_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(as->clk);
+ if (ret)
+ return ret;
+
+ atmel_spi_init(as);
+
+ clk_disable_unprepare(as->clk);
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = atmel_spi_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ /* Start the queue running */
+ return spi_master_resume(master);
+}
+
+static const struct dev_pm_ops atmel_spi_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
+ RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
+ atmel_spi_runtime_resume, NULL)
+};
+
+static const struct of_device_id atmel_spi_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-spi" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
+
+static struct platform_driver atmel_spi_driver = {
+ .driver = {
+ .name = "atmel_spi",
+ .pm = pm_ptr(&atmel_spi_pm_ops),
+ .of_match_table = atmel_spi_dt_ids,
+ },
+ .probe = atmel_spi_probe,
+ .remove = atmel_spi_remove,
+};
+module_platform_driver(atmel_spi_driver);
+
+MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atmel_spi");
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
new file mode 100644
index 000000000..e00876129
--- /dev/null
+++ b/drivers/spi/spi-au1550.c
@@ -0,0 +1,993 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * au1550 psc spi controller driver
+ * may work also with au1200, au1210, au1250
+ * will not work on au1000, au1100 and au1500 (no full spi controller there)
+ *
+ * Copyright (c) 2006 ATRON electronic GmbH
+ * Author: Jan Nikitenko <jan.nikitenko@gmail.com>
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_psc.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+
+#include <asm/mach-au1x00/au1550_spi.h>
+
+static unsigned int usedma = 1;
+module_param(usedma, uint, 0644);
+
+/*
+#define AU1550_SPI_DEBUG_LOOPBACK
+*/
+
+
+#define AU1550_SPI_DBDMA_DESCRIPTORS 1
+#define AU1550_SPI_DMA_RXTMP_MINSIZE 2048U
+
+struct au1550_spi {
+ struct spi_bitbang bitbang;
+
+ volatile psc_spi_t __iomem *regs;
+ int irq;
+
+ unsigned int len;
+ unsigned int tx_count;
+ unsigned int rx_count;
+ const u8 *tx;
+ u8 *rx;
+
+ void (*rx_word)(struct au1550_spi *hw);
+ void (*tx_word)(struct au1550_spi *hw);
+ int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
+ irqreturn_t (*irq_callback)(struct au1550_spi *hw);
+
+ struct completion master_done;
+
+ unsigned int usedma;
+ u32 dma_tx_id;
+ u32 dma_rx_id;
+ u32 dma_tx_ch;
+ u32 dma_rx_ch;
+
+ u8 *dma_rx_tmpbuf;
+ unsigned int dma_rx_tmpbuf_size;
+ u32 dma_rx_tmpbuf_addr;
+
+ struct spi_master *master;
+ struct device *dev;
+ struct au1550_spi_info *pdata;
+ struct resource *ioarea;
+};
+
+
+/* we use an 8-bit memory device for dma transfers to/from spi fifo */
+static dbdev_tab_t au1550_spi_mem_dbdev = {
+ .dev_id = DBDMA_MEM_CHAN,
+ .dev_flags = DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC,
+ .dev_tsize = 0,
+ .dev_devwidth = 8,
+ .dev_physaddr = 0x00000000,
+ .dev_intlevel = 0,
+ .dev_intpolarity = 0
+};
+
+static int ddma_memid; /* id to above mem dma device */
+
+static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw);
+
+
+/*
+ * compute BRG and DIV bits to setup spi clock based on main input clock rate
+ * that was specified in platform data structure
+ * according to au1550 datasheet:
+ * psc_tempclk = psc_mainclk / (2 << DIV)
+ * spiclk = psc_tempclk / (2 * (BRG + 1))
+ * BRG valid range is 4..63
+ * DIV valid range is 0..3
+ */
+static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned int speed_hz)
+{
+ u32 mainclk_hz = hw->pdata->mainclk_hz;
+ u32 div, brg;
+
+ for (div = 0; div < 4; div++) {
+ brg = mainclk_hz / speed_hz / (4 << div);
+ /* now we have BRG+1 in brg, so count with that */
+ if (brg < (4 + 1)) {
+ brg = (4 + 1); /* speed_hz too big */
+ break; /* set lowest brg (div is == 0) */
+ }
+ if (brg <= (63 + 1))
+ break; /* we have valid brg and div */
+ }
+ if (div == 4) {
+ div = 3; /* speed_hz too small */
+ brg = (63 + 1); /* set highest brg and div */
+ }
+ brg--;
+ return PSC_SPICFG_SET_BAUD(brg) | PSC_SPICFG_SET_DIV(div);
+}
+
+static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw)
+{
+ hw->regs->psc_spimsk =
+ PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO
+ | PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO
+ | PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD;
+ wmb(); /* drain writebuffer */
+
+ hw->regs->psc_spievent =
+ PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO
+ | PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO
+ | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD;
+ wmb(); /* drain writebuffer */
+}
+
+static void au1550_spi_reset_fifos(struct au1550_spi *hw)
+{
+ u32 pcr;
+
+ hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC;
+ wmb(); /* drain writebuffer */
+ do {
+ pcr = hw->regs->psc_spipcr;
+ wmb(); /* drain writebuffer */
+ } while (pcr != 0);
+}
+
+/*
+ * dma transfers are used for the most common spi word size of 8-bits
+ * we cannot easily change already set up dma channels' width, so if we wanted
+ * dma support for more than 8-bit words (up to 24 bits), we would need to
+ * setup dma channels from scratch on each spi transfer, based on bits_per_word
+ * instead we have pre set up 8 bit dma channels supporting spi 4 to 8 bits
+ * transfers, and 9 to 24 bits spi transfers will be done in pio irq based mode
+ * callbacks to handle dma or pio are set up in au1550_spi_bits_handlers_set()
+ */
+static void au1550_spi_chipsel(struct spi_device *spi, int value)
+{
+ struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
+ u32 cfg, stat;
+
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ if (hw->pdata->deactivate_cs)
+ hw->pdata->deactivate_cs(hw->pdata, spi->chip_select,
+ cspol);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
+
+ cfg = hw->regs->psc_spicfg;
+ wmb(); /* drain writebuffer */
+ hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE;
+ wmb(); /* drain writebuffer */
+
+ if (spi->mode & SPI_CPOL)
+ cfg |= PSC_SPICFG_BI;
+ else
+ cfg &= ~PSC_SPICFG_BI;
+ if (spi->mode & SPI_CPHA)
+ cfg &= ~PSC_SPICFG_CDE;
+ else
+ cfg |= PSC_SPICFG_CDE;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ cfg |= PSC_SPICFG_MLF;
+ else
+ cfg &= ~PSC_SPICFG_MLF;
+
+ if (hw->usedma && spi->bits_per_word <= 8)
+ cfg &= ~PSC_SPICFG_DD_DISABLE;
+ else
+ cfg |= PSC_SPICFG_DD_DISABLE;
+ cfg = PSC_SPICFG_CLR_LEN(cfg);
+ cfg |= PSC_SPICFG_SET_LEN(spi->bits_per_word);
+
+ cfg = PSC_SPICFG_CLR_BAUD(cfg);
+ cfg &= ~PSC_SPICFG_SET_DIV(3);
+ cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz);
+
+ hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE;
+ wmb(); /* drain writebuffer */
+ do {
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+ } while ((stat & PSC_SPISTAT_DR) == 0);
+
+ if (hw->pdata->activate_cs)
+ hw->pdata->activate_cs(hw->pdata, spi->chip_select,
+ cspol);
+ break;
+ }
+}
+
+static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ unsigned int bpw, hz;
+ u32 cfg, stat;
+
+ if (t) {
+ bpw = t->bits_per_word;
+ hz = t->speed_hz;
+ } else {
+ bpw = spi->bits_per_word;
+ hz = spi->max_speed_hz;
+ }
+
+ if (!hz)
+ return -EINVAL;
+
+ au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
+
+ cfg = hw->regs->psc_spicfg;
+ wmb(); /* drain writebuffer */
+ hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE;
+ wmb(); /* drain writebuffer */
+
+ if (hw->usedma && bpw <= 8)
+ cfg &= ~PSC_SPICFG_DD_DISABLE;
+ else
+ cfg |= PSC_SPICFG_DD_DISABLE;
+ cfg = PSC_SPICFG_CLR_LEN(cfg);
+ cfg |= PSC_SPICFG_SET_LEN(bpw);
+
+ cfg = PSC_SPICFG_CLR_BAUD(cfg);
+ cfg &= ~PSC_SPICFG_SET_DIV(3);
+ cfg |= au1550_spi_baudcfg(hw, hz);
+
+ hw->regs->psc_spicfg = cfg;
+ wmb(); /* drain writebuffer */
+
+ if (cfg & PSC_SPICFG_DE_ENABLE) {
+ do {
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+ } while ((stat & PSC_SPISTAT_DR) == 0);
+ }
+
+ au1550_spi_reset_fifos(hw);
+ au1550_spi_mask_ack_all(hw);
+ return 0;
+}
+
+/*
+ * for dma spi transfers, we have to setup rx channel, otherwise there is
+ * no reliable way how to recognize that spi transfer is done
+ * dma complete callbacks are called before real spi transfer is finished
+ * and if only tx dma channel is set up (and rx fifo overflow event masked)
+ * spi master done event irq is not generated unless rx fifo is empty (emptied)
+ * so we need rx tmp buffer to use for rx dma if user does not provide one
+ */
+static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned int size)
+{
+ hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL);
+ if (!hw->dma_rx_tmpbuf)
+ return -ENOMEM;
+ hw->dma_rx_tmpbuf_size = size;
+ hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
+ size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) {
+ kfree(hw->dma_rx_tmpbuf);
+ hw->dma_rx_tmpbuf = 0;
+ hw->dma_rx_tmpbuf_size = 0;
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw)
+{
+ dma_unmap_single(hw->dev, hw->dma_rx_tmpbuf_addr,
+ hw->dma_rx_tmpbuf_size, DMA_FROM_DEVICE);
+ kfree(hw->dma_rx_tmpbuf);
+ hw->dma_rx_tmpbuf = 0;
+ hw->dma_rx_tmpbuf_size = 0;
+}
+
+static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_rx_addr;
+ u32 res;
+
+ hw->len = t->len;
+ hw->tx_count = 0;
+ hw->rx_count = 0;
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ dma_tx_addr = t->tx_dma;
+ dma_rx_addr = t->rx_dma;
+
+ /*
+ * check if buffers are already dma mapped, map them otherwise:
+ * - first map the TX buffer, so cache data gets written to memory
+ * - then map the RX buffer, so that cache entries (with
+ * soon-to-be-stale data) get removed
+ * use rx buffer in place of tx if tx buffer was not provided
+ * use temp rx buffer (preallocated or realloc to fit) for rx dma
+ */
+ if (t->tx_buf) {
+ if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */
+ dma_tx_addr = dma_map_single(hw->dev,
+ (void *)t->tx_buf,
+ t->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(hw->dev, dma_tx_addr))
+ dev_err(hw->dev, "tx dma map error\n");
+ }
+ }
+
+ if (t->rx_buf) {
+ if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */
+ dma_rx_addr = dma_map_single(hw->dev,
+ (void *)t->rx_buf,
+ t->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(hw->dev, dma_rx_addr))
+ dev_err(hw->dev, "rx dma map error\n");
+ }
+ } else {
+ if (t->len > hw->dma_rx_tmpbuf_size) {
+ int ret;
+
+ au1550_spi_dma_rxtmp_free(hw);
+ ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len,
+ AU1550_SPI_DMA_RXTMP_MINSIZE));
+ if (ret < 0)
+ return ret;
+ }
+ hw->rx = hw->dma_rx_tmpbuf;
+ dma_rx_addr = hw->dma_rx_tmpbuf_addr;
+ dma_sync_single_for_device(hw->dev, dma_rx_addr,
+ t->len, DMA_FROM_DEVICE);
+ }
+
+ if (!t->tx_buf) {
+ dma_sync_single_for_device(hw->dev, dma_rx_addr,
+ t->len, DMA_BIDIRECTIONAL);
+ hw->tx = hw->rx;
+ }
+
+ /* put buffers on the ring */
+ res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx),
+ t->len, DDMA_FLAGS_IE);
+ if (!res)
+ dev_err(hw->dev, "rx dma put dest error\n");
+
+ res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx),
+ t->len, DDMA_FLAGS_IE);
+ if (!res)
+ dev_err(hw->dev, "tx dma put source error\n");
+
+ au1xxx_dbdma_start(hw->dma_rx_ch);
+ au1xxx_dbdma_start(hw->dma_tx_ch);
+
+ /* by default enable nearly all events interrupt */
+ hw->regs->psc_spimsk = PSC_SPIMSK_SD;
+ wmb(); /* drain writebuffer */
+
+ /* start the transfer */
+ hw->regs->psc_spipcr = PSC_SPIPCR_MS;
+ wmb(); /* drain writebuffer */
+
+ wait_for_completion(&hw->master_done);
+
+ au1xxx_dbdma_stop(hw->dma_tx_ch);
+ au1xxx_dbdma_stop(hw->dma_rx_ch);
+
+ if (!t->rx_buf) {
+ /* using the temporal preallocated and premapped buffer */
+ dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len,
+ DMA_FROM_DEVICE);
+ }
+ /* unmap buffers if mapped above */
+ if (t->rx_buf && t->rx_dma == 0)
+ dma_unmap_single(hw->dev, dma_rx_addr, t->len,
+ DMA_FROM_DEVICE);
+ if (t->tx_buf && t->tx_dma == 0)
+ dma_unmap_single(hw->dev, dma_tx_addr, t->len,
+ DMA_TO_DEVICE);
+
+ return min(hw->rx_count, hw->tx_count);
+}
+
+static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
+{
+ u32 stat, evnt;
+
+ stat = hw->regs->psc_spistat;
+ evnt = hw->regs->psc_spievent;
+ wmb(); /* drain writebuffer */
+ if ((stat & PSC_SPISTAT_DI) == 0) {
+ dev_err(hw->dev, "Unexpected IRQ!\n");
+ return IRQ_NONE;
+ }
+
+ if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO
+ | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO
+ | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD))
+ != 0) {
+ /*
+ * due to an spi error we consider transfer as done,
+ * so mask all events until before next transfer start
+ * and stop the possibly running dma immediately
+ */
+ au1550_spi_mask_ack_all(hw);
+ au1xxx_dbdma_stop(hw->dma_rx_ch);
+ au1xxx_dbdma_stop(hw->dma_tx_ch);
+
+ /* get number of transferred bytes */
+ hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch);
+ hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch);
+
+ au1xxx_dbdma_reset(hw->dma_rx_ch);
+ au1xxx_dbdma_reset(hw->dma_tx_ch);
+ au1550_spi_reset_fifos(hw);
+
+ if (evnt == PSC_SPIEVNT_RO)
+ dev_err(hw->dev,
+ "dma transfer: receive FIFO overflow!\n");
+ else
+ dev_err(hw->dev,
+ "dma transfer: unexpected SPI error (event=0x%x stat=0x%x)!\n",
+ evnt, stat);
+
+ complete(&hw->master_done);
+ return IRQ_HANDLED;
+ }
+
+ if ((evnt & PSC_SPIEVNT_MD) != 0) {
+ /* transfer completed successfully */
+ au1550_spi_mask_ack_all(hw);
+ hw->rx_count = hw->len;
+ hw->tx_count = hw->len;
+ complete(&hw->master_done);
+ }
+ return IRQ_HANDLED;
+}
+
+
+/* routines to handle different word sizes in pio mode */
+#define AU1550_SPI_RX_WORD(size, mask) \
+static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \
+{ \
+ u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \
+ wmb(); /* drain writebuffer */ \
+ if (hw->rx) { \
+ *(u##size *)hw->rx = (u##size)fifoword; \
+ hw->rx += (size) / 8; \
+ } \
+ hw->rx_count += (size) / 8; \
+}
+
+#define AU1550_SPI_TX_WORD(size, mask) \
+static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \
+{ \
+ u32 fifoword = 0; \
+ if (hw->tx) { \
+ fifoword = *(u##size *)hw->tx & (u32)(mask); \
+ hw->tx += (size) / 8; \
+ } \
+ hw->tx_count += (size) / 8; \
+ if (hw->tx_count >= hw->len) \
+ fifoword |= PSC_SPITXRX_LC; \
+ hw->regs->psc_spitxrx = fifoword; \
+ wmb(); /* drain writebuffer */ \
+}
+
+AU1550_SPI_RX_WORD(8, 0xff)
+AU1550_SPI_RX_WORD(16, 0xffff)
+AU1550_SPI_RX_WORD(32, 0xffffff)
+AU1550_SPI_TX_WORD(8, 0xff)
+AU1550_SPI_TX_WORD(16, 0xffff)
+AU1550_SPI_TX_WORD(32, 0xffffff)
+
+static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t)
+{
+ u32 stat, mask;
+ struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->tx_count = 0;
+ hw->rx_count = 0;
+
+ /* by default enable nearly all events after filling tx fifo */
+ mask = PSC_SPIMSK_SD;
+
+ /* fill the transmit FIFO */
+ while (hw->tx_count < hw->len) {
+
+ hw->tx_word(hw);
+
+ if (hw->tx_count >= hw->len) {
+ /* mask tx fifo request interrupt as we are done */
+ mask |= PSC_SPIMSK_TR;
+ }
+
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+ if (stat & PSC_SPISTAT_TF)
+ break;
+ }
+
+ /* enable event interrupts */
+ hw->regs->psc_spimsk = mask;
+ wmb(); /* drain writebuffer */
+
+ /* start the transfer */
+ hw->regs->psc_spipcr = PSC_SPIPCR_MS;
+ wmb(); /* drain writebuffer */
+
+ wait_for_completion(&hw->master_done);
+
+ return min(hw->rx_count, hw->tx_count);
+}
+
+static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
+{
+ int busy;
+ u32 stat, evnt;
+
+ stat = hw->regs->psc_spistat;
+ evnt = hw->regs->psc_spievent;
+ wmb(); /* drain writebuffer */
+ if ((stat & PSC_SPISTAT_DI) == 0) {
+ dev_err(hw->dev, "Unexpected IRQ!\n");
+ return IRQ_NONE;
+ }
+
+ if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO
+ | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO
+ | PSC_SPIEVNT_SD))
+ != 0) {
+ /*
+ * due to an error we consider transfer as done,
+ * so mask all events until before next transfer start
+ */
+ au1550_spi_mask_ack_all(hw);
+ au1550_spi_reset_fifos(hw);
+ dev_err(hw->dev,
+ "pio transfer: unexpected SPI error (event=0x%x stat=0x%x)!\n",
+ evnt, stat);
+ complete(&hw->master_done);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * while there is something to read from rx fifo
+ * or there is a space to write to tx fifo:
+ */
+ do {
+ busy = 0;
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+
+ /*
+ * Take care to not let the Rx FIFO overflow.
+ *
+ * We only write a byte if we have read one at least. Initially,
+ * the write fifo is full, so we should read from the read fifo
+ * first.
+ * In case we miss a word from the read fifo, we should get a
+ * RO event and should back out.
+ */
+ if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) {
+ hw->rx_word(hw);
+ busy = 1;
+
+ if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len)
+ hw->tx_word(hw);
+ }
+ } while (busy);
+
+ hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR;
+ wmb(); /* drain writebuffer */
+
+ /*
+ * Restart the SPI transmission in case of a transmit underflow.
+ * This seems to work despite the notes in the Au1550 data book
+ * of Figure 8-4 with flowchart for SPI master operation:
+ *
+ * """Note 1: An XFR Error Interrupt occurs, unless masked,
+ * for any of the following events: Tx FIFO Underflow,
+ * Rx FIFO Overflow, or Multiple-master Error
+ * Note 2: In case of a Tx Underflow Error, all zeroes are
+ * transmitted."""
+ *
+ * By simply restarting the spi transfer on Tx Underflow Error,
+ * we assume that spi transfer was paused instead of zeroes
+ * transmittion mentioned in the Note 2 of Au1550 data book.
+ */
+ if (evnt & PSC_SPIEVNT_TU) {
+ hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD;
+ wmb(); /* drain writebuffer */
+ hw->regs->psc_spipcr = PSC_SPIPCR_MS;
+ wmb(); /* drain writebuffer */
+ }
+
+ if (hw->rx_count >= hw->len) {
+ /* transfer completed successfully */
+ au1550_spi_mask_ack_all(hw);
+ complete(&hw->master_done);
+ }
+ return IRQ_HANDLED;
+}
+
+static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+
+ return hw->txrx_bufs(spi, t);
+}
+
+static irqreturn_t au1550_spi_irq(int irq, void *dev)
+{
+ struct au1550_spi *hw = dev;
+
+ return hw->irq_callback(hw);
+}
+
+static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw)
+{
+ if (bpw <= 8) {
+ if (hw->usedma) {
+ hw->txrx_bufs = &au1550_spi_dma_txrxb;
+ hw->irq_callback = &au1550_spi_dma_irq_callback;
+ } else {
+ hw->rx_word = &au1550_spi_rx_word_8;
+ hw->tx_word = &au1550_spi_tx_word_8;
+ hw->txrx_bufs = &au1550_spi_pio_txrxb;
+ hw->irq_callback = &au1550_spi_pio_irq_callback;
+ }
+ } else if (bpw <= 16) {
+ hw->rx_word = &au1550_spi_rx_word_16;
+ hw->tx_word = &au1550_spi_tx_word_16;
+ hw->txrx_bufs = &au1550_spi_pio_txrxb;
+ hw->irq_callback = &au1550_spi_pio_irq_callback;
+ } else {
+ hw->rx_word = &au1550_spi_rx_word_32;
+ hw->tx_word = &au1550_spi_tx_word_32;
+ hw->txrx_bufs = &au1550_spi_pio_txrxb;
+ hw->irq_callback = &au1550_spi_pio_irq_callback;
+ }
+}
+
+static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
+{
+ u32 stat, cfg;
+
+ /* set up the PSC for SPI mode */
+ hw->regs->psc_ctrl = PSC_CTRL_DISABLE;
+ wmb(); /* drain writebuffer */
+ hw->regs->psc_sel = PSC_SEL_PS_SPIMODE;
+ wmb(); /* drain writebuffer */
+
+ hw->regs->psc_spicfg = 0;
+ wmb(); /* drain writebuffer */
+
+ hw->regs->psc_ctrl = PSC_CTRL_ENABLE;
+ wmb(); /* drain writebuffer */
+
+ do {
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+ } while ((stat & PSC_SPISTAT_SR) == 0);
+
+
+ cfg = hw->usedma ? 0 : PSC_SPICFG_DD_DISABLE;
+ cfg |= PSC_SPICFG_SET_LEN(8);
+ cfg |= PSC_SPICFG_RT_FIFO8 | PSC_SPICFG_TT_FIFO8;
+ /* use minimal allowed brg and div values as initial setting: */
+ cfg |= PSC_SPICFG_SET_BAUD(4) | PSC_SPICFG_SET_DIV(0);
+
+#ifdef AU1550_SPI_DEBUG_LOOPBACK
+ cfg |= PSC_SPICFG_LB;
+#endif
+
+ hw->regs->psc_spicfg = cfg;
+ wmb(); /* drain writebuffer */
+
+ au1550_spi_mask_ack_all(hw);
+
+ hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE;
+ wmb(); /* drain writebuffer */
+
+ do {
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+ } while ((stat & PSC_SPISTAT_DR) == 0);
+
+ au1550_spi_reset_fifos(hw);
+}
+
+
+static int au1550_spi_probe(struct platform_device *pdev)
+{
+ struct au1550_spi *hw;
+ struct spi_master *master;
+ struct resource *r;
+ int err = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "No memory for spi_master\n");
+ err = -ENOMEM;
+ goto err_nomem;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 24);
+
+ hw = spi_master_get_devdata(master);
+
+ hw->master = master;
+ hw->pdata = dev_get_platdata(&pdev->dev);
+ hw->dev = &pdev->dev;
+
+ if (hw->pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ err = -ENOENT;
+ goto err_no_pdata;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no IRQ\n");
+ err = -ENODEV;
+ goto err_no_iores;
+ }
+ hw->irq = r->start;
+
+ hw->usedma = 0;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r) {
+ hw->dma_tx_id = r->start;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r) {
+ hw->dma_rx_id = r->start;
+ if (usedma && ddma_memid) {
+ if (pdev->dev.dma_mask == NULL)
+ dev_warn(&pdev->dev, "no dma mask\n");
+ else
+ hw->usedma = 1;
+ }
+ }
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no mmio resource\n");
+ err = -ENODEV;
+ goto err_no_iores;
+ }
+
+ hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t),
+ pdev->name);
+ if (!hw->ioarea) {
+ dev_err(&pdev->dev, "Cannot reserve iomem region\n");
+ err = -ENXIO;
+ goto err_no_iores;
+ }
+
+ hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t));
+ if (!hw->regs) {
+ dev_err(&pdev->dev, "cannot ioremap\n");
+ err = -ENXIO;
+ goto err_ioremap;
+ }
+
+ platform_set_drvdata(pdev, hw);
+
+ init_completion(&hw->master_done);
+
+ hw->bitbang.master = hw->master;
+ hw->bitbang.setup_transfer = au1550_spi_setupxfer;
+ hw->bitbang.chipselect = au1550_spi_chipsel;
+ hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
+
+ if (hw->usedma) {
+ hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid,
+ hw->dma_tx_id, NULL, (void *)hw);
+ if (hw->dma_tx_ch == 0) {
+ dev_err(&pdev->dev,
+ "Cannot allocate tx dma channel\n");
+ err = -ENXIO;
+ goto err_no_txdma;
+ }
+ au1xxx_dbdma_set_devwidth(hw->dma_tx_ch, 8);
+ if (au1xxx_dbdma_ring_alloc(hw->dma_tx_ch,
+ AU1550_SPI_DBDMA_DESCRIPTORS) == 0) {
+ dev_err(&pdev->dev,
+ "Cannot allocate tx dma descriptors\n");
+ err = -ENXIO;
+ goto err_no_txdma_descr;
+ }
+
+
+ hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id,
+ ddma_memid, NULL, (void *)hw);
+ if (hw->dma_rx_ch == 0) {
+ dev_err(&pdev->dev,
+ "Cannot allocate rx dma channel\n");
+ err = -ENXIO;
+ goto err_no_rxdma;
+ }
+ au1xxx_dbdma_set_devwidth(hw->dma_rx_ch, 8);
+ if (au1xxx_dbdma_ring_alloc(hw->dma_rx_ch,
+ AU1550_SPI_DBDMA_DESCRIPTORS) == 0) {
+ dev_err(&pdev->dev,
+ "Cannot allocate rx dma descriptors\n");
+ err = -ENXIO;
+ goto err_no_rxdma_descr;
+ }
+
+ err = au1550_spi_dma_rxtmp_alloc(hw,
+ AU1550_SPI_DMA_RXTMP_MINSIZE);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Cannot allocate initial rx dma tmp buffer\n");
+ goto err_dma_rxtmp_alloc;
+ }
+ }
+
+ au1550_spi_bits_handlers_set(hw, 8);
+
+ err = request_irq(hw->irq, au1550_spi_irq, 0, pdev->name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ goto err_no_irq;
+ }
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = hw->pdata->num_chipselect;
+
+ /*
+ * precompute valid range for spi freq - from au1550 datasheet:
+ * psc_tempclk = psc_mainclk / (2 << DIV)
+ * spiclk = psc_tempclk / (2 * (BRG + 1))
+ * BRG valid range is 4..63
+ * DIV valid range is 0..3
+ * round the min and max frequencies to values that would still
+ * produce valid brg and div
+ */
+ {
+ int min_div = (2 << 0) * (2 * (4 + 1));
+ int max_div = (2 << 3) * (2 * (63 + 1));
+
+ master->max_speed_hz = hw->pdata->mainclk_hz / min_div;
+ master->min_speed_hz =
+ hw->pdata->mainclk_hz / (max_div + 1) + 1;
+ }
+
+ au1550_spi_setup_psc_as_spi(hw);
+
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register SPI master\n");
+ goto err_register;
+ }
+
+ dev_info(&pdev->dev,
+ "spi master registered: bus_num=%d num_chipselect=%d\n",
+ master->bus_num, master->num_chipselect);
+
+ return 0;
+
+err_register:
+ free_irq(hw->irq, hw);
+
+err_no_irq:
+ au1550_spi_dma_rxtmp_free(hw);
+
+err_dma_rxtmp_alloc:
+err_no_rxdma_descr:
+ if (hw->usedma)
+ au1xxx_dbdma_chan_free(hw->dma_rx_ch);
+
+err_no_rxdma:
+err_no_txdma_descr:
+ if (hw->usedma)
+ au1xxx_dbdma_chan_free(hw->dma_tx_ch);
+
+err_no_txdma:
+ iounmap((void __iomem *)hw->regs);
+
+err_ioremap:
+ release_mem_region(r->start, sizeof(psc_spi_t));
+
+err_no_iores:
+err_no_pdata:
+ spi_master_put(hw->master);
+
+err_nomem:
+ return err;
+}
+
+static int au1550_spi_remove(struct platform_device *pdev)
+{
+ struct au1550_spi *hw = platform_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "spi master remove: bus_num=%d\n",
+ hw->master->bus_num);
+
+ spi_bitbang_stop(&hw->bitbang);
+ free_irq(hw->irq, hw);
+ iounmap((void __iomem *)hw->regs);
+ release_mem_region(hw->ioarea->start, sizeof(psc_spi_t));
+
+ if (hw->usedma) {
+ au1550_spi_dma_rxtmp_free(hw);
+ au1xxx_dbdma_chan_free(hw->dma_rx_ch);
+ au1xxx_dbdma_chan_free(hw->dma_tx_ch);
+ }
+
+ spi_master_put(hw->master);
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:au1550-spi");
+
+static struct platform_driver au1550_spi_drv = {
+ .probe = au1550_spi_probe,
+ .remove = au1550_spi_remove,
+ .driver = {
+ .name = "au1550-spi",
+ },
+};
+
+static int __init au1550_spi_init(void)
+{
+ /*
+ * create memory device with 8 bits dev_devwidth
+ * needed for proper byte ordering to spi fifo
+ */
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ case ALCHEMY_CPU_AU1300:
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (usedma) {
+ ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
+ if (!ddma_memid)
+ printk(KERN_ERR "au1550-spi: cannot add memory dbdma device\n");
+ }
+ return platform_driver_register(&au1550_spi_drv);
+}
+module_init(au1550_spi_init);
+
+static void __exit au1550_spi_exit(void)
+{
+ if (usedma && ddma_memid)
+ au1xxx_ddma_del_device(ddma_memid);
+ platform_driver_unregister(&au1550_spi_drv);
+}
+module_exit(au1550_spi_exit);
+
+MODULE_DESCRIPTION("Au1550 PSC SPI Driver");
+MODULE_AUTHOR("Jan Nikitenko <jan.nikitenko@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
new file mode 100644
index 000000000..80c3e38f5
--- /dev/null
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI-Engine SPI controller driver
+ * Copyright 2015 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff)
+#define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff)
+#define SPI_ENGINE_VERSION_PATCH(x) (x & 0xff)
+
+#define SPI_ENGINE_REG_VERSION 0x00
+
+#define SPI_ENGINE_REG_RESET 0x40
+
+#define SPI_ENGINE_REG_INT_ENABLE 0x80
+#define SPI_ENGINE_REG_INT_PENDING 0x84
+#define SPI_ENGINE_REG_INT_SOURCE 0x88
+
+#define SPI_ENGINE_REG_SYNC_ID 0xc0
+
+#define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
+#define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
+#define SPI_ENGINE_REG_SDI_FIFO_LEVEL 0xd8
+
+#define SPI_ENGINE_REG_CMD_FIFO 0xe0
+#define SPI_ENGINE_REG_SDO_DATA_FIFO 0xe4
+#define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
+#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
+
+#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
+#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
+#define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
+#define SPI_ENGINE_INT_SYNC BIT(3)
+
+#define SPI_ENGINE_CONFIG_CPHA BIT(0)
+#define SPI_ENGINE_CONFIG_CPOL BIT(1)
+#define SPI_ENGINE_CONFIG_3WIRE BIT(2)
+
+#define SPI_ENGINE_INST_TRANSFER 0x0
+#define SPI_ENGINE_INST_ASSERT 0x1
+#define SPI_ENGINE_INST_WRITE 0x2
+#define SPI_ENGINE_INST_MISC 0x3
+
+#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
+#define SPI_ENGINE_CMD_REG_CONFIG 0x1
+
+#define SPI_ENGINE_MISC_SYNC 0x0
+#define SPI_ENGINE_MISC_SLEEP 0x1
+
+#define SPI_ENGINE_TRANSFER_WRITE 0x1
+#define SPI_ENGINE_TRANSFER_READ 0x2
+
+#define SPI_ENGINE_CMD(inst, arg1, arg2) \
+ (((inst) << 12) | ((arg1) << 8) | (arg2))
+
+#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
+#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
+#define SPI_ENGINE_CMD_WRITE(reg, val) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
+#define SPI_ENGINE_CMD_SLEEP(delay) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
+#define SPI_ENGINE_CMD_SYNC(id) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
+
+struct spi_engine_program {
+ unsigned int length;
+ uint16_t instructions[];
+};
+
+struct spi_engine {
+ struct clk *clk;
+ struct clk *ref_clk;
+
+ spinlock_t lock;
+
+ void __iomem *base;
+
+ struct spi_message *msg;
+ struct spi_engine_program *p;
+ unsigned cmd_length;
+ const uint16_t *cmd_buf;
+
+ struct spi_transfer *tx_xfer;
+ unsigned int tx_length;
+ const uint8_t *tx_buf;
+
+ struct spi_transfer *rx_xfer;
+ unsigned int rx_length;
+ uint8_t *rx_buf;
+
+ unsigned int sync_id;
+ unsigned int completed_id;
+
+ unsigned int int_enable;
+};
+
+static void spi_engine_program_add_cmd(struct spi_engine_program *p,
+ bool dry, uint16_t cmd)
+{
+ if (!dry)
+ p->instructions[p->length] = cmd;
+ p->length++;
+}
+
+static unsigned int spi_engine_get_config(struct spi_device *spi)
+{
+ unsigned int config = 0;
+
+ if (spi->mode & SPI_CPOL)
+ config |= SPI_ENGINE_CONFIG_CPOL;
+ if (spi->mode & SPI_CPHA)
+ config |= SPI_ENGINE_CONFIG_CPHA;
+ if (spi->mode & SPI_3WIRE)
+ config |= SPI_ENGINE_CONFIG_3WIRE;
+
+ return config;
+}
+
+static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
+ struct spi_device *spi, struct spi_transfer *xfer)
+{
+ unsigned int clk_div;
+
+ clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
+ xfer->speed_hz * 2);
+ if (clk_div > 255)
+ clk_div = 255;
+ else if (clk_div > 0)
+ clk_div -= 1;
+
+ return clk_div;
+}
+
+static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
+ struct spi_transfer *xfer)
+{
+ unsigned int len = xfer->len;
+
+ while (len) {
+ unsigned int n = min(len, 256U);
+ unsigned int flags = 0;
+
+ if (xfer->tx_buf)
+ flags |= SPI_ENGINE_TRANSFER_WRITE;
+ if (xfer->rx_buf)
+ flags |= SPI_ENGINE_TRANSFER_READ;
+
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
+ len -= n;
+ }
+}
+
+static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
+ struct spi_engine *spi_engine, unsigned int clk_div,
+ struct spi_transfer *xfer)
+{
+ unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
+ unsigned int t;
+ int delay;
+
+ delay = spi_delay_to_ns(&xfer->delay, xfer);
+ if (delay < 0)
+ return;
+ delay /= 1000;
+
+ if (delay == 0)
+ return;
+
+ t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
+ while (t) {
+ unsigned int n = min(t, 256U);
+
+ spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
+ t -= n;
+ }
+}
+
+static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
+ struct spi_device *spi, bool assert)
+{
+ unsigned int mask = 0xff;
+
+ if (assert)
+ mask ^= BIT(spi->chip_select);
+
+ spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
+}
+
+static int spi_engine_compile_message(struct spi_engine *spi_engine,
+ struct spi_message *msg, bool dry, struct spi_engine_program *p)
+{
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *xfer;
+ int clk_div, new_clk_div;
+ bool cs_change = true;
+
+ clk_div = -1;
+
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
+ spi_engine_get_config(spi)));
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
+ if (new_clk_div != clk_div) {
+ clk_div = new_clk_div;
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
+ clk_div));
+ }
+
+ if (cs_change)
+ spi_engine_gen_cs(p, dry, spi, true);
+
+ spi_engine_gen_xfer(p, dry, xfer);
+ spi_engine_gen_sleep(p, dry, spi_engine, clk_div, xfer);
+
+ cs_change = xfer->cs_change;
+ if (list_is_last(&xfer->transfer_list, &msg->transfers))
+ cs_change = !cs_change;
+
+ if (cs_change)
+ spi_engine_gen_cs(p, dry, spi, false);
+ }
+
+ return 0;
+}
+
+static void spi_engine_xfer_next(struct spi_engine *spi_engine,
+ struct spi_transfer **_xfer)
+{
+ struct spi_message *msg = spi_engine->msg;
+ struct spi_transfer *xfer = *_xfer;
+
+ if (!xfer) {
+ xfer = list_first_entry(&msg->transfers,
+ struct spi_transfer, transfer_list);
+ } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
+ xfer = NULL;
+ } else {
+ xfer = list_next_entry(xfer, transfer_list);
+ }
+
+ *_xfer = xfer;
+}
+
+static void spi_engine_tx_next(struct spi_engine *spi_engine)
+{
+ struct spi_transfer *xfer = spi_engine->tx_xfer;
+
+ do {
+ spi_engine_xfer_next(spi_engine, &xfer);
+ } while (xfer && !xfer->tx_buf);
+
+ spi_engine->tx_xfer = xfer;
+ if (xfer) {
+ spi_engine->tx_length = xfer->len;
+ spi_engine->tx_buf = xfer->tx_buf;
+ } else {
+ spi_engine->tx_buf = NULL;
+ }
+}
+
+static void spi_engine_rx_next(struct spi_engine *spi_engine)
+{
+ struct spi_transfer *xfer = spi_engine->rx_xfer;
+
+ do {
+ spi_engine_xfer_next(spi_engine, &xfer);
+ } while (xfer && !xfer->rx_buf);
+
+ spi_engine->rx_xfer = xfer;
+ if (xfer) {
+ spi_engine->rx_length = xfer->len;
+ spi_engine->rx_buf = xfer->rx_buf;
+ } else {
+ spi_engine->rx_buf = NULL;
+ }
+}
+
+static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
+{
+ void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
+ unsigned int n, m, i;
+ const uint16_t *buf;
+
+ n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
+ while (n && spi_engine->cmd_length) {
+ m = min(n, spi_engine->cmd_length);
+ buf = spi_engine->cmd_buf;
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+ spi_engine->cmd_buf += m;
+ spi_engine->cmd_length -= m;
+ n -= m;
+ }
+
+ return spi_engine->cmd_length != 0;
+}
+
+static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
+{
+ void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
+ unsigned int n, m, i;
+ const uint8_t *buf;
+
+ n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
+ while (n && spi_engine->tx_length) {
+ m = min(n, spi_engine->tx_length);
+ buf = spi_engine->tx_buf;
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+ spi_engine->tx_buf += m;
+ spi_engine->tx_length -= m;
+ n -= m;
+ if (spi_engine->tx_length == 0)
+ spi_engine_tx_next(spi_engine);
+ }
+
+ return spi_engine->tx_length != 0;
+}
+
+static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
+{
+ void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
+ unsigned int n, m, i;
+ uint8_t *buf;
+
+ n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
+ while (n && spi_engine->rx_length) {
+ m = min(n, spi_engine->rx_length);
+ buf = spi_engine->rx_buf;
+ for (i = 0; i < m; i++)
+ buf[i] = readl_relaxed(addr);
+ spi_engine->rx_buf += m;
+ spi_engine->rx_length -= m;
+ n -= m;
+ if (spi_engine->rx_length == 0)
+ spi_engine_rx_next(spi_engine);
+ }
+
+ return spi_engine->rx_length != 0;
+}
+
+static irqreturn_t spi_engine_irq(int irq, void *devid)
+{
+ struct spi_master *master = devid;
+ struct spi_engine *spi_engine = spi_master_get_devdata(master);
+ unsigned int disable_int = 0;
+ unsigned int pending;
+
+ pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+
+ if (pending & SPI_ENGINE_INT_SYNC) {
+ writel_relaxed(SPI_ENGINE_INT_SYNC,
+ spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ spi_engine->completed_id = readl_relaxed(
+ spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
+ }
+
+ spin_lock(&spi_engine->lock);
+
+ if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
+ if (!spi_engine_write_cmd_fifo(spi_engine))
+ disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
+ }
+
+ if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
+ if (!spi_engine_write_tx_fifo(spi_engine))
+ disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
+ }
+
+ if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
+ if (!spi_engine_read_rx_fifo(spi_engine))
+ disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+ }
+
+ if (pending & SPI_ENGINE_INT_SYNC) {
+ if (spi_engine->msg &&
+ spi_engine->completed_id == spi_engine->sync_id) {
+ struct spi_message *msg = spi_engine->msg;
+
+ kfree(spi_engine->p);
+ msg->status = 0;
+ msg->actual_length = msg->frame_length;
+ spi_engine->msg = NULL;
+ spi_finalize_current_message(master);
+ disable_int |= SPI_ENGINE_INT_SYNC;
+ }
+ }
+
+ if (disable_int) {
+ spi_engine->int_enable &= ~disable_int;
+ writel_relaxed(spi_engine->int_enable,
+ spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ }
+
+ spin_unlock(&spi_engine->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_engine_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_engine_program p_dry, *p;
+ struct spi_engine *spi_engine = spi_master_get_devdata(master);
+ unsigned int int_enable = 0;
+ unsigned long flags;
+ size_t size;
+
+ p_dry.length = 0;
+ spi_engine_compile_message(spi_engine, msg, true, &p_dry);
+
+ size = sizeof(*p->instructions) * (p_dry.length + 1);
+ p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ spi_engine_compile_message(spi_engine, msg, false, p);
+
+ spin_lock_irqsave(&spi_engine->lock, flags);
+ spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
+ spi_engine_program_add_cmd(p, false,
+ SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
+
+ spi_engine->msg = msg;
+ spi_engine->p = p;
+
+ spi_engine->cmd_buf = p->instructions;
+ spi_engine->cmd_length = p->length;
+ if (spi_engine_write_cmd_fifo(spi_engine))
+ int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
+
+ spi_engine_tx_next(spi_engine);
+ if (spi_engine_write_tx_fifo(spi_engine))
+ int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
+
+ spi_engine_rx_next(spi_engine);
+ if (spi_engine->rx_length != 0)
+ int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+
+ int_enable |= SPI_ENGINE_INT_SYNC;
+
+ writel_relaxed(int_enable,
+ spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ spi_engine->int_enable = int_enable;
+ spin_unlock_irqrestore(&spi_engine->lock, flags);
+
+ return 0;
+}
+
+static int spi_engine_probe(struct platform_device *pdev)
+{
+ struct spi_engine *spi_engine;
+ struct spi_master *master;
+ unsigned int version;
+ int irq;
+ int ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
+ if (!spi_engine)
+ return -ENOMEM;
+
+ master = spi_alloc_master(&pdev->dev, 0);
+ if (!master)
+ return -ENOMEM;
+
+ spi_master_set_devdata(master, spi_engine);
+
+ spin_lock_init(&spi_engine->lock);
+
+ spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(spi_engine->clk)) {
+ ret = PTR_ERR(spi_engine->clk);
+ goto err_put_master;
+ }
+
+ spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
+ if (IS_ERR(spi_engine->ref_clk)) {
+ ret = PTR_ERR(spi_engine->ref_clk);
+ goto err_put_master;
+ }
+
+ ret = clk_prepare_enable(spi_engine->clk);
+ if (ret)
+ goto err_put_master;
+
+ ret = clk_prepare_enable(spi_engine->ref_clk);
+ if (ret)
+ goto err_clk_disable;
+
+ spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spi_engine->base)) {
+ ret = PTR_ERR(spi_engine->base);
+ goto err_ref_clk_disable;
+ }
+
+ version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
+ if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
+ dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
+ SPI_ENGINE_VERSION_MAJOR(version),
+ SPI_ENGINE_VERSION_MINOR(version),
+ SPI_ENGINE_VERSION_PATCH(version));
+ ret = -ENODEV;
+ goto err_ref_clk_disable;
+ }
+
+ writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
+ writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+
+ ret = request_irq(irq, spi_engine_irq, 0, pdev->name, master);
+ if (ret)
+ goto err_ref_clk_disable;
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
+ master->transfer_one_message = spi_engine_transfer_one_message;
+ master->num_chipselect = 8;
+
+ ret = spi_register_master(master);
+ if (ret)
+ goto err_free_irq;
+
+ platform_set_drvdata(pdev, master);
+
+ return 0;
+err_free_irq:
+ free_irq(irq, master);
+err_ref_clk_disable:
+ clk_disable_unprepare(spi_engine->ref_clk);
+err_clk_disable:
+ clk_disable_unprepare(spi_engine->clk);
+err_put_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int spi_engine_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct spi_engine *spi_engine = spi_master_get_devdata(master);
+ int irq = platform_get_irq(pdev, 0);
+
+ spi_unregister_master(master);
+
+ free_irq(irq, master);
+
+ spi_master_put(master);
+
+ writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
+
+ clk_disable_unprepare(spi_engine->ref_clk);
+ clk_disable_unprepare(spi_engine->clk);
+
+ return 0;
+}
+
+static const struct of_device_id spi_engine_match_table[] = {
+ { .compatible = "adi,axi-spi-engine-1.00.a" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, spi_engine_match_table);
+
+static struct platform_driver spi_engine_driver = {
+ .probe = spi_engine_probe,
+ .remove = spi_engine_remove,
+ .driver = {
+ .name = "spi-engine",
+ .of_match_table = spi_engine_match_table,
+ },
+};
+module_platform_driver(spi_engine_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
new file mode 100644
index 000000000..b24955910
--- /dev/null
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -0,0 +1,1738 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Broadcom BRCMSTB, NSP, NS2, Cygnus SPI Controllers
+ *
+ * Copyright 2016 Broadcom
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include "spi-bcm-qspi.h"
+
+#define DRIVER_NAME "bcm_qspi"
+
+
+/* BSPI register offsets */
+#define BSPI_REVISION_ID 0x000
+#define BSPI_SCRATCH 0x004
+#define BSPI_MAST_N_BOOT_CTRL 0x008
+#define BSPI_BUSY_STATUS 0x00c
+#define BSPI_INTR_STATUS 0x010
+#define BSPI_B0_STATUS 0x014
+#define BSPI_B0_CTRL 0x018
+#define BSPI_B1_STATUS 0x01c
+#define BSPI_B1_CTRL 0x020
+#define BSPI_STRAP_OVERRIDE_CTRL 0x024
+#define BSPI_FLEX_MODE_ENABLE 0x028
+#define BSPI_BITS_PER_CYCLE 0x02c
+#define BSPI_BITS_PER_PHASE 0x030
+#define BSPI_CMD_AND_MODE_BYTE 0x034
+#define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
+#define BSPI_BSPI_XOR_VALUE 0x03c
+#define BSPI_BSPI_XOR_ENABLE 0x040
+#define BSPI_BSPI_PIO_MODE_ENABLE 0x044
+#define BSPI_BSPI_PIO_IODIR 0x048
+#define BSPI_BSPI_PIO_DATA 0x04c
+
+/* RAF register offsets */
+#define BSPI_RAF_START_ADDR 0x100
+#define BSPI_RAF_NUM_WORDS 0x104
+#define BSPI_RAF_CTRL 0x108
+#define BSPI_RAF_FULLNESS 0x10c
+#define BSPI_RAF_WATERMARK 0x110
+#define BSPI_RAF_STATUS 0x114
+#define BSPI_RAF_READ_DATA 0x118
+#define BSPI_RAF_WORD_CNT 0x11c
+#define BSPI_RAF_CURR_ADDR 0x120
+
+/* Override mode masks */
+#define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE BIT(0)
+#define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL BIT(1)
+#define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE BIT(2)
+#define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD BIT(3)
+#define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE BIT(4)
+
+#define BSPI_ADDRLEN_3BYTES 3
+#define BSPI_ADDRLEN_4BYTES 4
+
+#define BSPI_RAF_STATUS_FIFO_EMPTY_MASK BIT(1)
+
+#define BSPI_RAF_CTRL_START_MASK BIT(0)
+#define BSPI_RAF_CTRL_CLEAR_MASK BIT(1)
+
+#define BSPI_BPP_MODE_SELECT_MASK BIT(8)
+#define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
+
+#define BSPI_READ_LENGTH 256
+
+/* MSPI register offsets */
+#define MSPI_SPCR0_LSB 0x000
+#define MSPI_SPCR0_MSB 0x004
+#define MSPI_SPCR0_MSB_CPHA BIT(0)
+#define MSPI_SPCR0_MSB_CPOL BIT(1)
+#define MSPI_SPCR0_MSB_BITS_SHIFT 0x2
+#define MSPI_SPCR1_LSB 0x008
+#define MSPI_SPCR1_MSB 0x00c
+#define MSPI_NEWQP 0x010
+#define MSPI_ENDQP 0x014
+#define MSPI_SPCR2 0x018
+#define MSPI_MSPI_STATUS 0x020
+#define MSPI_CPTQP 0x024
+#define MSPI_SPCR3 0x028
+#define MSPI_REV 0x02c
+#define MSPI_TXRAM 0x040
+#define MSPI_RXRAM 0x0c0
+#define MSPI_CDRAM 0x140
+#define MSPI_WRITE_LOCK 0x180
+
+#define MSPI_MASTER_BIT BIT(7)
+
+#define MSPI_NUM_CDRAM 16
+#define MSPI_CDRAM_OUTP BIT(8)
+#define MSPI_CDRAM_CONT_BIT BIT(7)
+#define MSPI_CDRAM_BITSE_BIT BIT(6)
+#define MSPI_CDRAM_DT_BIT BIT(5)
+#define MSPI_CDRAM_PCS 0xf
+
+#define MSPI_SPCR2_SPE BIT(6)
+#define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
+
+#define MSPI_SPCR3_FASTBR BIT(0)
+#define MSPI_SPCR3_FASTDT BIT(1)
+#define MSPI_SPCR3_SYSCLKSEL_MASK GENMASK(11, 10)
+#define MSPI_SPCR3_SYSCLKSEL_27 (MSPI_SPCR3_SYSCLKSEL_MASK & \
+ ~(BIT(10) | BIT(11)))
+#define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \
+ BIT(11))
+#define MSPI_SPCR3_TXRXDAM_MASK GENMASK(4, 2)
+#define MSPI_SPCR3_DAM_8BYTE 0
+#define MSPI_SPCR3_DAM_16BYTE (BIT(2) | BIT(4))
+#define MSPI_SPCR3_DAM_32BYTE (BIT(3) | BIT(5))
+#define MSPI_SPCR3_HALFDUPLEX BIT(6)
+#define MSPI_SPCR3_HDOUTTYPE BIT(7)
+#define MSPI_SPCR3_DATA_REG_SZ BIT(8)
+#define MSPI_SPCR3_CPHARX BIT(9)
+
+#define MSPI_MSPI_STATUS_SPIF BIT(0)
+
+#define INTR_BASE_BIT_SHIFT 0x02
+#define INTR_COUNT 0x07
+
+#define NUM_CHIPSELECT 4
+#define QSPI_SPBR_MAX 255U
+#define MSPI_BASE_FREQ 27000000UL
+
+#define OPCODE_DIOR 0xBB
+#define OPCODE_QIOR 0xEB
+#define OPCODE_DIOR_4B 0xBC
+#define OPCODE_QIOR_4B 0xEC
+
+#define MAX_CMD_SIZE 6
+
+#define ADDR_4MB_MASK GENMASK(22, 0)
+
+/* stop at end of transfer, no other reason */
+#define TRANS_STATUS_BREAK_NONE 0
+/* stop at end of spi_message */
+#define TRANS_STATUS_BREAK_EOM 1
+/* stop at end of spi_transfer if delay */
+#define TRANS_STATUS_BREAK_DELAY 2
+/* stop at end of spi_transfer if cs_change */
+#define TRANS_STATUS_BREAK_CS_CHANGE 4
+/* stop if we run out of bytes */
+#define TRANS_STATUS_BREAK_NO_BYTES 8
+
+/* events that make us stop filling TX slots */
+#define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM | \
+ TRANS_STATUS_BREAK_DELAY | \
+ TRANS_STATUS_BREAK_CS_CHANGE)
+
+/* events that make us deassert CS */
+#define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
+ TRANS_STATUS_BREAK_CS_CHANGE)
+
+/*
+ * Used for writing and reading data in the right order
+ * to TXRAM and RXRAM when used as 32-bit registers respectively
+ */
+#define swap4bytes(__val) \
+ ((((__val) >> 24) & 0x000000FF) | (((__val) >> 8) & 0x0000FF00) | \
+ (((__val) << 8) & 0x00FF0000) | (((__val) << 24) & 0xFF000000))
+
+struct bcm_qspi_parms {
+ u32 speed_hz;
+ u8 mode;
+ u8 bits_per_word;
+};
+
+struct bcm_xfer_mode {
+ bool flex_mode;
+ unsigned int width;
+ unsigned int addrlen;
+ unsigned int hp;
+};
+
+enum base_type {
+ MSPI,
+ BSPI,
+ CHIP_SELECT,
+ BASEMAX,
+};
+
+enum irq_source {
+ SINGLE_L2,
+ MUXED_L1,
+};
+
+struct bcm_qspi_irq {
+ const char *irq_name;
+ const irq_handler_t irq_handler;
+ int irq_source;
+ u32 mask;
+};
+
+struct bcm_qspi_dev_id {
+ const struct bcm_qspi_irq *irqp;
+ void *dev;
+};
+
+
+struct qspi_trans {
+ struct spi_transfer *trans;
+ int byte;
+ bool mspi_last_trans;
+};
+
+struct bcm_qspi {
+ struct platform_device *pdev;
+ struct spi_master *master;
+ struct clk *clk;
+ u32 base_clk;
+ u32 max_speed_hz;
+ void __iomem *base[BASEMAX];
+
+ /* Some SoCs provide custom interrupt status register(s) */
+ struct bcm_qspi_soc_intc *soc_intc;
+
+ struct bcm_qspi_parms last_parms;
+ struct qspi_trans trans_pos;
+ int curr_cs;
+ int bspi_maj_rev;
+ int bspi_min_rev;
+ int bspi_enabled;
+ const struct spi_mem_op *bspi_rf_op;
+ u32 bspi_rf_op_idx;
+ u32 bspi_rf_op_len;
+ u32 bspi_rf_op_status;
+ struct bcm_xfer_mode xfer_mode;
+ u32 s3_strap_override_ctrl;
+ bool bspi_mode;
+ bool big_endian;
+ int num_irqs;
+ struct bcm_qspi_dev_id *dev_ids;
+ struct completion mspi_done;
+ struct completion bspi_done;
+ u8 mspi_maj_rev;
+ u8 mspi_min_rev;
+ bool mspi_spcr3_sysclk;
+};
+
+static inline bool has_bspi(struct bcm_qspi *qspi)
+{
+ return qspi->bspi_mode;
+}
+
+/* hardware supports spcr3 and fast baud-rate */
+static inline bool bcm_qspi_has_fastbr(struct bcm_qspi *qspi)
+{
+ if (!has_bspi(qspi) &&
+ ((qspi->mspi_maj_rev >= 1) &&
+ (qspi->mspi_min_rev >= 5)))
+ return true;
+
+ return false;
+}
+
+/* hardware supports sys clk 108Mhz */
+static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
+{
+ if (!has_bspi(qspi) && (qspi->mspi_spcr3_sysclk ||
+ ((qspi->mspi_maj_rev >= 1) &&
+ (qspi->mspi_min_rev >= 6))))
+ return true;
+
+ return false;
+}
+
+static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
+{
+ if (bcm_qspi_has_fastbr(qspi))
+ return (bcm_qspi_has_sysclk_108(qspi) ? 4 : 1);
+ else
+ return 8;
+}
+
+static u32 bcm_qspi_calc_spbr(u32 clk_speed_hz,
+ const struct bcm_qspi_parms *xp)
+{
+ u32 spbr = 0;
+
+ /* SPBR = System Clock/(2 * SCK Baud Rate) */
+ if (xp->speed_hz)
+ spbr = clk_speed_hz / (xp->speed_hz * 2);
+
+ return spbr;
+}
+
+/* Read qspi controller register*/
+static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
+ unsigned int offset)
+{
+ return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
+}
+
+/* Write qspi controller register*/
+static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
+ unsigned int offset, unsigned int data)
+{
+ bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
+}
+
+/* BSPI helpers */
+static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
+{
+ int i;
+
+ /* this should normally finish within 10us */
+ for (i = 0; i < 1000; i++) {
+ if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
+ return 0;
+ udelay(1);
+ }
+ dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
+ return -EIO;
+}
+
+static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
+{
+ if (qspi->bspi_maj_rev < 4)
+ return true;
+ return false;
+}
+
+static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
+{
+ bcm_qspi_bspi_busy_poll(qspi);
+ /* Force rising edge for the b0/b1 'flush' field */
+ bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
+ bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
+ bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
+ bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
+}
+
+static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
+{
+ return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
+ BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
+}
+
+static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
+{
+ u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
+
+ /* BSPI v3 LR is LE only, convert data to host endianness */
+ if (bcm_qspi_bspi_ver_three(qspi))
+ data = le32_to_cpu(data);
+
+ return data;
+}
+
+static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
+{
+ bcm_qspi_bspi_busy_poll(qspi);
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
+ BSPI_RAF_CTRL_START_MASK);
+}
+
+static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
+{
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
+ BSPI_RAF_CTRL_CLEAR_MASK);
+ bcm_qspi_bspi_flush_prefetch_buffers(qspi);
+}
+
+static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
+{
+ u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
+ u32 data = 0;
+
+ dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
+ qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
+ while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
+ data = bcm_qspi_bspi_lr_read_fifo(qspi);
+ if (likely(qspi->bspi_rf_op_len >= 4) &&
+ IS_ALIGNED((uintptr_t)buf, 4)) {
+ buf[qspi->bspi_rf_op_idx++] = data;
+ qspi->bspi_rf_op_len -= 4;
+ } else {
+ /* Read out remaining bytes, make sure*/
+ u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
+
+ data = cpu_to_le32(data);
+ while (qspi->bspi_rf_op_len) {
+ *cbuf++ = (u8)data;
+ data >>= 8;
+ qspi->bspi_rf_op_len--;
+ }
+ }
+ }
+}
+
+static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
+ int bpp, int bpc, int flex_mode)
+{
+ bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
+ bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
+ bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
+ bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
+ bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
+}
+
+static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
+ const struct spi_mem_op *op, int hp)
+{
+ int bpc = 0, bpp = 0;
+ u8 command = op->cmd.opcode;
+ int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
+ int addrlen = op->addr.nbytes;
+ int flex_mode = 1;
+
+ dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
+ width, addrlen, hp);
+
+ if (addrlen == BSPI_ADDRLEN_4BYTES)
+ bpp = BSPI_BPP_ADDR_SELECT_MASK;
+
+ if (op->dummy.nbytes)
+ bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
+
+ switch (width) {
+ case SPI_NBITS_SINGLE:
+ if (addrlen == BSPI_ADDRLEN_3BYTES)
+ /* default mode, does not need flex_cmd */
+ flex_mode = 0;
+ break;
+ case SPI_NBITS_DUAL:
+ bpc = 0x00000001;
+ if (hp) {
+ bpc |= 0x00010100; /* address and mode are 2-bit */
+ bpp = BSPI_BPP_MODE_SELECT_MASK;
+ }
+ break;
+ case SPI_NBITS_QUAD:
+ bpc = 0x00000002;
+ if (hp) {
+ bpc |= 0x00020200; /* address and mode are 4-bit */
+ bpp |= BSPI_BPP_MODE_SELECT_MASK;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
+
+ return 0;
+}
+
+static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
+ const struct spi_mem_op *op, int hp)
+{
+ int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
+ int addrlen = op->addr.nbytes;
+ u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
+
+ dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
+ width, addrlen, hp);
+
+ switch (width) {
+ case SPI_NBITS_SINGLE:
+ /* clear quad/dual mode */
+ data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
+ BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
+ break;
+ case SPI_NBITS_QUAD:
+ /* clear dual mode and set quad mode */
+ data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
+ data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
+ break;
+ case SPI_NBITS_DUAL:
+ /* clear quad mode set dual mode */
+ data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
+ data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (addrlen == BSPI_ADDRLEN_4BYTES)
+ /* set 4byte mode*/
+ data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
+ else
+ /* clear 4 byte mode */
+ data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
+
+ /* set the override mode */
+ data |= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
+ bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
+ bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
+
+ return 0;
+}
+
+static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
+ const struct spi_mem_op *op, int hp)
+{
+ int error = 0;
+ int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
+ int addrlen = op->addr.nbytes;
+
+ /* default mode */
+ qspi->xfer_mode.flex_mode = true;
+
+ if (!bcm_qspi_bspi_ver_three(qspi)) {
+ u32 val, mask;
+
+ val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
+ mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
+ if (val & mask || qspi->s3_strap_override_ctrl & mask) {
+ qspi->xfer_mode.flex_mode = false;
+ bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
+ error = bcm_qspi_bspi_set_override(qspi, op, hp);
+ }
+ }
+
+ if (qspi->xfer_mode.flex_mode)
+ error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
+
+ if (error) {
+ dev_warn(&qspi->pdev->dev,
+ "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
+ width, addrlen, hp);
+ } else if (qspi->xfer_mode.width != width ||
+ qspi->xfer_mode.addrlen != addrlen ||
+ qspi->xfer_mode.hp != hp) {
+ qspi->xfer_mode.width = width;
+ qspi->xfer_mode.addrlen = addrlen;
+ qspi->xfer_mode.hp = hp;
+ dev_dbg(&qspi->pdev->dev,
+ "cs:%d %d-lane output, %d-byte address%s\n",
+ qspi->curr_cs,
+ qspi->xfer_mode.width,
+ qspi->xfer_mode.addrlen,
+ qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
+ }
+
+ return error;
+}
+
+static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
+{
+ if (!has_bspi(qspi))
+ return;
+
+ qspi->bspi_enabled = 1;
+ if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
+ return;
+
+ bcm_qspi_bspi_flush_prefetch_buffers(qspi);
+ udelay(1);
+ bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
+ udelay(1);
+}
+
+static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
+{
+ if (!has_bspi(qspi))
+ return;
+
+ qspi->bspi_enabled = 0;
+ if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
+ return;
+
+ bcm_qspi_bspi_busy_poll(qspi);
+ bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
+ udelay(1);
+}
+
+static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
+{
+ u32 rd = 0;
+ u32 wr = 0;
+
+ if (cs >= 0 && qspi->base[CHIP_SELECT]) {
+ rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
+ wr = (rd & ~0xff) | (1 << cs);
+ if (rd == wr)
+ return;
+ bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
+ usleep_range(10, 20);
+ }
+
+ dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
+ qspi->curr_cs = cs;
+}
+
+static bool bcmspi_parms_did_change(const struct bcm_qspi_parms * const cur,
+ const struct bcm_qspi_parms * const prev)
+{
+ return (cur->speed_hz != prev->speed_hz) ||
+ (cur->mode != prev->mode) ||
+ (cur->bits_per_word != prev->bits_per_word);
+}
+
+
+/* MSPI helpers */
+static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
+ const struct bcm_qspi_parms *xp)
+{
+ u32 spcr, spbr = 0;
+
+ if (!bcmspi_parms_did_change(xp, &qspi->last_parms))
+ return;
+
+ if (!qspi->mspi_maj_rev)
+ /* legacy controller */
+ spcr = MSPI_MASTER_BIT;
+ else
+ spcr = 0;
+
+ /*
+ * Bits per transfer. BITS determines the number of data bits
+ * transferred if the command control bit (BITSE of a
+ * CDRAM Register) is equal to 1.
+ * If CDRAM BITSE is equal to 0, 8 data bits are transferred
+ * regardless
+ */
+ if (xp->bits_per_word != 16 && xp->bits_per_word != 64)
+ spcr |= xp->bits_per_word << MSPI_SPCR0_MSB_BITS_SHIFT;
+
+ spcr |= xp->mode & (MSPI_SPCR0_MSB_CPHA | MSPI_SPCR0_MSB_CPOL);
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
+
+ if (bcm_qspi_has_fastbr(qspi)) {
+ spcr = 0;
+
+ /* enable fastbr */
+ spcr |= MSPI_SPCR3_FASTBR;
+
+ if (xp->mode & SPI_3WIRE)
+ spcr |= MSPI_SPCR3_HALFDUPLEX | MSPI_SPCR3_HDOUTTYPE;
+
+ if (bcm_qspi_has_sysclk_108(qspi)) {
+ /* check requested baud rate before moving to 108Mhz */
+ spbr = bcm_qspi_calc_spbr(MSPI_BASE_FREQ * 4, xp);
+ if (spbr > QSPI_SPBR_MAX) {
+ /* use SYSCLK_27Mhz for slower baud rates */
+ spcr &= ~MSPI_SPCR3_SYSCLKSEL_MASK;
+ qspi->base_clk = MSPI_BASE_FREQ;
+ } else {
+ /* SYSCLK_108Mhz */
+ spcr |= MSPI_SPCR3_SYSCLKSEL_108;
+ qspi->base_clk = MSPI_BASE_FREQ * 4;
+ }
+ }
+
+ if (xp->bits_per_word > 16) {
+ /* data_reg_size 1 (64bit) */
+ spcr |= MSPI_SPCR3_DATA_REG_SZ;
+ /* TxRx RAM data access mode 2 for 32B and set fastdt */
+ spcr |= MSPI_SPCR3_DAM_32BYTE | MSPI_SPCR3_FASTDT;
+ /*
+ * Set length of delay after transfer
+ * DTL from 0(256) to 1
+ */
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 1);
+ } else {
+ /* data_reg_size[8] = 0 */
+ spcr &= ~(MSPI_SPCR3_DATA_REG_SZ);
+
+ /*
+ * TxRx RAM access mode 8B
+ * and disable fastdt
+ */
+ spcr &= ~(MSPI_SPCR3_DAM_32BYTE);
+ }
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
+ }
+
+ /* SCK Baud Rate = System Clock/(2 * SPBR) */
+ qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
+ spbr = bcm_qspi_calc_spbr(qspi->base_clk, xp);
+ spbr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spbr);
+
+ qspi->last_parms = *xp;
+}
+
+static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
+ struct spi_device *spi,
+ struct spi_transfer *trans)
+{
+ struct bcm_qspi_parms xp;
+
+ xp.speed_hz = trans->speed_hz;
+ xp.bits_per_word = trans->bits_per_word;
+ xp.mode = spi->mode;
+
+ bcm_qspi_hw_set_parms(qspi, &xp);
+}
+
+static int bcm_qspi_setup(struct spi_device *spi)
+{
+ struct bcm_qspi_parms *xp;
+
+ if (spi->bits_per_word > 64)
+ return -EINVAL;
+
+ xp = spi_get_ctldata(spi);
+ if (!xp) {
+ xp = kzalloc(sizeof(*xp), GFP_KERNEL);
+ if (!xp)
+ return -ENOMEM;
+ spi_set_ctldata(spi, xp);
+ }
+ xp->speed_hz = spi->max_speed_hz;
+ xp->mode = spi->mode;
+
+ if (spi->bits_per_word)
+ xp->bits_per_word = spi->bits_per_word;
+ else
+ xp->bits_per_word = 8;
+
+ return 0;
+}
+
+static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
+ struct qspi_trans *qt)
+{
+ if (qt->mspi_last_trans &&
+ spi_transfer_is_last(qspi->master, qt->trans))
+ return true;
+ else
+ return false;
+}
+
+static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
+ struct qspi_trans *qt, int flags)
+{
+ int ret = TRANS_STATUS_BREAK_NONE;
+
+ /* count the last transferred bytes */
+ if (qt->trans->bits_per_word <= 8)
+ qt->byte++;
+ else if (qt->trans->bits_per_word <= 16)
+ qt->byte += 2;
+ else if (qt->trans->bits_per_word <= 32)
+ qt->byte += 4;
+ else if (qt->trans->bits_per_word <= 64)
+ qt->byte += 8;
+
+ if (qt->byte >= qt->trans->len) {
+ /* we're at the end of the spi_transfer */
+ /* in TX mode, need to pause for a delay or CS change */
+ if (qt->trans->delay.value &&
+ (flags & TRANS_STATUS_BREAK_DELAY))
+ ret |= TRANS_STATUS_BREAK_DELAY;
+ if (qt->trans->cs_change &&
+ (flags & TRANS_STATUS_BREAK_CS_CHANGE))
+ ret |= TRANS_STATUS_BREAK_CS_CHANGE;
+
+ if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
+ ret |= TRANS_STATUS_BREAK_EOM;
+ else
+ ret |= TRANS_STATUS_BREAK_NO_BYTES;
+
+ qt->trans = NULL;
+ }
+
+ dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
+ qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
+ return ret;
+}
+
+static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
+{
+ u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
+
+ /* mask out reserved bits */
+ return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
+}
+
+static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
+{
+ u32 reg_offset = MSPI_RXRAM;
+ u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
+ u32 msb_offset = reg_offset + (slot << 3);
+
+ return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
+ ((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
+}
+
+static inline u32 read_rxram_slot_u32(struct bcm_qspi *qspi, int slot)
+{
+ u32 reg_offset = MSPI_RXRAM;
+ u32 offset = reg_offset + (slot << 3);
+ u32 val;
+
+ val = bcm_qspi_read(qspi, MSPI, offset);
+ val = swap4bytes(val);
+
+ return val;
+}
+
+static inline u64 read_rxram_slot_u64(struct bcm_qspi *qspi, int slot)
+{
+ u32 reg_offset = MSPI_RXRAM;
+ u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
+ u32 msb_offset = reg_offset + (slot << 3);
+ u32 msb, lsb;
+
+ msb = bcm_qspi_read(qspi, MSPI, msb_offset);
+ msb = swap4bytes(msb);
+ lsb = bcm_qspi_read(qspi, MSPI, lsb_offset);
+ lsb = swap4bytes(lsb);
+
+ return ((u64)msb << 32 | lsb);
+}
+
+static void read_from_hw(struct bcm_qspi *qspi, int slots)
+{
+ struct qspi_trans tp;
+ int slot;
+
+ bcm_qspi_disable_bspi(qspi);
+
+ if (slots > MSPI_NUM_CDRAM) {
+ /* should never happen */
+ dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
+ return;
+ }
+
+ tp = qspi->trans_pos;
+
+ for (slot = 0; slot < slots; slot++) {
+ if (tp.trans->bits_per_word <= 8) {
+ u8 *buf = tp.trans->rx_buf;
+
+ if (buf)
+ buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
+ dev_dbg(&qspi->pdev->dev, "RD %02x\n",
+ buf ? buf[tp.byte] : 0x0);
+ } else if (tp.trans->bits_per_word <= 16) {
+ u16 *buf = tp.trans->rx_buf;
+
+ if (buf)
+ buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
+ slot);
+ dev_dbg(&qspi->pdev->dev, "RD %04x\n",
+ buf ? buf[tp.byte / 2] : 0x0);
+ } else if (tp.trans->bits_per_word <= 32) {
+ u32 *buf = tp.trans->rx_buf;
+
+ if (buf)
+ buf[tp.byte / 4] = read_rxram_slot_u32(qspi,
+ slot);
+ dev_dbg(&qspi->pdev->dev, "RD %08x\n",
+ buf ? buf[tp.byte / 4] : 0x0);
+
+ } else if (tp.trans->bits_per_word <= 64) {
+ u64 *buf = tp.trans->rx_buf;
+
+ if (buf)
+ buf[tp.byte / 8] = read_rxram_slot_u64(qspi,
+ slot);
+ dev_dbg(&qspi->pdev->dev, "RD %llx\n",
+ buf ? buf[tp.byte / 8] : 0x0);
+
+
+ }
+
+ update_qspi_trans_byte_count(qspi, &tp,
+ TRANS_STATUS_BREAK_NONE);
+ }
+
+ qspi->trans_pos = tp;
+}
+
+static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
+ u8 val)
+{
+ u32 reg_offset = MSPI_TXRAM + (slot << 3);
+
+ /* mask out reserved bits */
+ bcm_qspi_write(qspi, MSPI, reg_offset, val);
+}
+
+static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
+ u16 val)
+{
+ u32 reg_offset = MSPI_TXRAM;
+ u32 msb_offset = reg_offset + (slot << 3);
+ u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
+
+ bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
+ bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
+}
+
+static inline void write_txram_slot_u32(struct bcm_qspi *qspi, int slot,
+ u32 val)
+{
+ u32 reg_offset = MSPI_TXRAM;
+ u32 msb_offset = reg_offset + (slot << 3);
+
+ bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(val));
+}
+
+static inline void write_txram_slot_u64(struct bcm_qspi *qspi, int slot,
+ u64 val)
+{
+ u32 reg_offset = MSPI_TXRAM;
+ u32 msb_offset = reg_offset + (slot << 3);
+ u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
+ u32 msb = upper_32_bits(val);
+ u32 lsb = lower_32_bits(val);
+
+ bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(msb));
+ bcm_qspi_write(qspi, MSPI, lsb_offset, swap4bytes(lsb));
+}
+
+static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
+{
+ return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
+}
+
+static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
+{
+ bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
+}
+
+/* Return number of slots written */
+static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
+{
+ struct qspi_trans tp;
+ int slot = 0, tstatus = 0;
+ u32 mspi_cdram = 0;
+
+ bcm_qspi_disable_bspi(qspi);
+ tp = qspi->trans_pos;
+ bcm_qspi_update_parms(qspi, spi, tp.trans);
+
+ /* Run until end of transfer or reached the max data */
+ while (!tstatus && slot < MSPI_NUM_CDRAM) {
+ mspi_cdram = MSPI_CDRAM_CONT_BIT;
+ if (tp.trans->bits_per_word <= 8) {
+ const u8 *buf = tp.trans->tx_buf;
+ u8 val = buf ? buf[tp.byte] : 0x00;
+
+ write_txram_slot_u8(qspi, slot, val);
+ dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
+ } else if (tp.trans->bits_per_word <= 16) {
+ const u16 *buf = tp.trans->tx_buf;
+ u16 val = buf ? buf[tp.byte / 2] : 0x0000;
+
+ write_txram_slot_u16(qspi, slot, val);
+ dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
+ } else if (tp.trans->bits_per_word <= 32) {
+ const u32 *buf = tp.trans->tx_buf;
+ u32 val = buf ? buf[tp.byte/4] : 0x0;
+
+ write_txram_slot_u32(qspi, slot, val);
+ dev_dbg(&qspi->pdev->dev, "WR %08x\n", val);
+ } else if (tp.trans->bits_per_word <= 64) {
+ const u64 *buf = tp.trans->tx_buf;
+ u64 val = (buf ? buf[tp.byte/8] : 0x0);
+
+ /* use the length of delay from SPCR1_LSB */
+ if (bcm_qspi_has_fastbr(qspi))
+ mspi_cdram |= MSPI_CDRAM_DT_BIT;
+
+ write_txram_slot_u64(qspi, slot, val);
+ dev_dbg(&qspi->pdev->dev, "WR %llx\n", val);
+ }
+
+ mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
+ MSPI_CDRAM_BITSE_BIT);
+
+ /* set 3wrire halfduplex mode data from master to slave */
+ if ((spi->mode & SPI_3WIRE) && tp.trans->tx_buf)
+ mspi_cdram |= MSPI_CDRAM_OUTP;
+
+ if (has_bspi(qspi))
+ mspi_cdram &= ~1;
+ else
+ mspi_cdram |= (~(1 << spi->chip_select) &
+ MSPI_CDRAM_PCS);
+
+ write_cdram_slot(qspi, slot, mspi_cdram);
+
+ tstatus = update_qspi_trans_byte_count(qspi, &tp,
+ TRANS_STATUS_BREAK_TX);
+ slot++;
+ }
+
+ if (!slot) {
+ dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
+ goto done;
+ }
+
+ dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
+ bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
+ bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
+
+ /*
+ * case 1) EOM =1, cs_change =0: SSb inactive
+ * case 2) EOM =1, cs_change =1: SSb stay active
+ * case 3) EOM =0, cs_change =0: SSb stay active
+ * case 4) EOM =0, cs_change =1: SSb inactive
+ */
+ if (((tstatus & TRANS_STATUS_BREAK_DESELECT)
+ == TRANS_STATUS_BREAK_CS_CHANGE) ||
+ ((tstatus & TRANS_STATUS_BREAK_DESELECT)
+ == TRANS_STATUS_BREAK_EOM)) {
+ mspi_cdram = read_cdram_slot(qspi, slot - 1) &
+ ~MSPI_CDRAM_CONT_BIT;
+ write_cdram_slot(qspi, slot - 1, mspi_cdram);
+ }
+
+ if (has_bspi(qspi))
+ bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
+
+ /* Must flush previous writes before starting MSPI operation */
+ mb();
+ /* Set cont | spe | spifie */
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
+
+done:
+ return slot;
+}
+
+static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
+ const struct spi_mem_op *op)
+{
+ struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
+ u32 addr = 0, len, rdlen, len_words, from = 0;
+ int ret = 0;
+ unsigned long timeo = msecs_to_jiffies(100);
+ struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
+
+ if (bcm_qspi_bspi_ver_three(qspi))
+ if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
+ return -EIO;
+
+ from = op->addr.val;
+ if (!spi->cs_gpiod)
+ bcm_qspi_chip_select(qspi, spi->chip_select);
+ bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
+
+ /*
+ * when using flex mode we need to send
+ * the upper address byte to bspi
+ */
+ if (!bcm_qspi_bspi_ver_three(qspi)) {
+ addr = from & 0xff000000;
+ bcm_qspi_write(qspi, BSPI,
+ BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
+ }
+
+ if (!qspi->xfer_mode.flex_mode)
+ addr = from;
+ else
+ addr = from & 0x00ffffff;
+
+ if (bcm_qspi_bspi_ver_three(qspi) == true)
+ addr = (addr + 0xc00000) & 0xffffff;
+
+ /*
+ * read into the entire buffer by breaking the reads
+ * into RAF buffer read lengths
+ */
+ len = op->data.nbytes;
+ qspi->bspi_rf_op_idx = 0;
+
+ do {
+ if (len > BSPI_READ_LENGTH)
+ rdlen = BSPI_READ_LENGTH;
+ else
+ rdlen = len;
+
+ reinit_completion(&qspi->bspi_done);
+ bcm_qspi_enable_bspi(qspi);
+ len_words = (rdlen + 3) >> 2;
+ qspi->bspi_rf_op = op;
+ qspi->bspi_rf_op_status = 0;
+ qspi->bspi_rf_op_len = rdlen;
+ dev_dbg(&qspi->pdev->dev,
+ "bspi xfr addr 0x%x len 0x%x", addr, rdlen);
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
+ if (qspi->soc_intc) {
+ /*
+ * clear soc MSPI and BSPI interrupts and enable
+ * BSPI interrupts.
+ */
+ soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
+ soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
+ }
+
+ /* Must flush previous writes before starting BSPI operation */
+ mb();
+ bcm_qspi_bspi_lr_start(qspi);
+ if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
+ dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ /* set msg return length */
+ addr += rdlen;
+ len -= rdlen;
+ } while (len);
+
+ return ret;
+}
+
+static int bcm_qspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *trans)
+{
+ struct bcm_qspi *qspi = spi_master_get_devdata(master);
+ int slots;
+ unsigned long timeo = msecs_to_jiffies(100);
+
+ if (!spi->cs_gpiod)
+ bcm_qspi_chip_select(qspi, spi->chip_select);
+ qspi->trans_pos.trans = trans;
+ qspi->trans_pos.byte = 0;
+
+ while (qspi->trans_pos.byte < trans->len) {
+ reinit_completion(&qspi->mspi_done);
+
+ slots = write_to_hw(qspi, spi);
+ if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
+ dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
+ return -ETIMEDOUT;
+ }
+
+ read_from_hw(qspi, slots);
+ }
+ bcm_qspi_enable_bspi(qspi);
+
+ return 0;
+}
+
+static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
+ const struct spi_mem_op *op)
+{
+ struct spi_master *master = spi->master;
+ struct bcm_qspi *qspi = spi_master_get_devdata(master);
+ struct spi_transfer t[2];
+ u8 cmd[6] = { };
+ int ret, i;
+
+ memset(cmd, 0, sizeof(cmd));
+ memset(t, 0, sizeof(t));
+
+ /* tx */
+ /* opcode is in cmd[0] */
+ cmd[0] = op->cmd.opcode;
+ for (i = 0; i < op->addr.nbytes; i++)
+ cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
+
+ t[0].tx_buf = cmd;
+ t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
+ t[0].bits_per_word = spi->bits_per_word;
+ t[0].tx_nbits = op->cmd.buswidth;
+ /* lets mspi know that this is not last transfer */
+ qspi->trans_pos.mspi_last_trans = false;
+ ret = bcm_qspi_transfer_one(master, spi, &t[0]);
+
+ /* rx */
+ qspi->trans_pos.mspi_last_trans = true;
+ if (!ret) {
+ /* rx */
+ t[1].rx_buf = op->data.buf.in;
+ t[1].len = op->data.nbytes;
+ t[1].rx_nbits = op->data.buswidth;
+ t[1].bits_per_word = spi->bits_per_word;
+ ret = bcm_qspi_transfer_one(master, spi, &t[1]);
+ }
+
+ return ret;
+}
+
+static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct spi_device *spi = mem->spi;
+ struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
+ int ret = 0;
+ bool mspi_read = false;
+ u32 addr = 0, len;
+ u_char *buf;
+
+ if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
+ op->data.dir != SPI_MEM_DATA_IN)
+ return -ENOTSUPP;
+
+ buf = op->data.buf.in;
+ addr = op->addr.val;
+ len = op->data.nbytes;
+
+ if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
+ /*
+ * The address coming into this function is a raw flash offset.
+ * But for BSPI <= V3, we need to convert it to a remapped BSPI
+ * address. If it crosses a 4MB boundary, just revert back to
+ * using MSPI.
+ */
+ addr = (addr + 0xc00000) & 0xffffff;
+
+ if ((~ADDR_4MB_MASK & addr) ^
+ (~ADDR_4MB_MASK & (addr + len - 1)))
+ mspi_read = true;
+ }
+
+ /* non-aligned and very short transfers are handled by MSPI */
+ if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
+ len < 4 || op->cmd.opcode == SPINOR_OP_RDSFDP)
+ mspi_read = true;
+
+ if (!has_bspi(qspi) || mspi_read)
+ return bcm_qspi_mspi_exec_mem_op(spi, op);
+
+ ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
+
+ if (!ret)
+ ret = bcm_qspi_bspi_exec_mem_op(spi, op);
+
+ return ret;
+}
+
+static void bcm_qspi_cleanup(struct spi_device *spi)
+{
+ struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
+
+ kfree(xp);
+}
+
+static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
+{
+ struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
+ struct bcm_qspi *qspi = qspi_dev_id->dev;
+ u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
+
+ if (status & MSPI_MSPI_STATUS_SPIF) {
+ struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
+ /* clear interrupt */
+ status &= ~MSPI_MSPI_STATUS_SPIF;
+ bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
+ if (qspi->soc_intc)
+ soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
+ complete(&qspi->mspi_done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
+{
+ struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
+ struct bcm_qspi *qspi = qspi_dev_id->dev;
+ struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
+ u32 status = qspi_dev_id->irqp->mask;
+
+ if (qspi->bspi_enabled && qspi->bspi_rf_op) {
+ bcm_qspi_bspi_lr_data_read(qspi);
+ if (qspi->bspi_rf_op_len == 0) {
+ qspi->bspi_rf_op = NULL;
+ if (qspi->soc_intc) {
+ /* disable soc BSPI interrupt */
+ soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
+ false);
+ /* indicate done */
+ status = INTR_BSPI_LR_SESSION_DONE_MASK;
+ }
+
+ if (qspi->bspi_rf_op_status)
+ bcm_qspi_bspi_lr_clear(qspi);
+ else
+ bcm_qspi_bspi_flush_prefetch_buffers(qspi);
+ }
+
+ if (qspi->soc_intc)
+ /* clear soc BSPI interrupt */
+ soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
+ }
+
+ status &= INTR_BSPI_LR_SESSION_DONE_MASK;
+ if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
+ complete(&qspi->bspi_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
+{
+ struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
+ struct bcm_qspi *qspi = qspi_dev_id->dev;
+ struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
+
+ dev_err(&qspi->pdev->dev, "BSPI INT error\n");
+ qspi->bspi_rf_op_status = -EIO;
+ if (qspi->soc_intc)
+ /* clear soc interrupt */
+ soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
+
+ complete(&qspi->bspi_done);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
+{
+ struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
+ struct bcm_qspi *qspi = qspi_dev_id->dev;
+ struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (soc_intc) {
+ u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
+
+ if (status & MSPI_DONE)
+ ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
+ else if (status & BSPI_DONE)
+ ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
+ else if (status & BSPI_ERR)
+ ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
+ }
+
+ return ret;
+}
+
+static const struct bcm_qspi_irq qspi_irq_tab[] = {
+ {
+ .irq_name = "spi_lr_fullness_reached",
+ .irq_handler = bcm_qspi_bspi_lr_l2_isr,
+ .mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
+ },
+ {
+ .irq_name = "spi_lr_session_aborted",
+ .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
+ .mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
+ },
+ {
+ .irq_name = "spi_lr_impatient",
+ .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
+ .mask = INTR_BSPI_LR_IMPATIENT_MASK,
+ },
+ {
+ .irq_name = "spi_lr_session_done",
+ .irq_handler = bcm_qspi_bspi_lr_l2_isr,
+ .mask = INTR_BSPI_LR_SESSION_DONE_MASK,
+ },
+#ifdef QSPI_INT_DEBUG
+ /* this interrupt is for debug purposes only, dont request irq */
+ {
+ .irq_name = "spi_lr_overread",
+ .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
+ .mask = INTR_BSPI_LR_OVERREAD_MASK,
+ },
+#endif
+ {
+ .irq_name = "mspi_done",
+ .irq_handler = bcm_qspi_mspi_l2_isr,
+ .mask = INTR_MSPI_DONE_MASK,
+ },
+ {
+ .irq_name = "mspi_halted",
+ .irq_handler = bcm_qspi_mspi_l2_isr,
+ .mask = INTR_MSPI_HALTED_MASK,
+ },
+ {
+ /* single muxed L1 interrupt source */
+ .irq_name = "spi_l1_intr",
+ .irq_handler = bcm_qspi_l1_isr,
+ .irq_source = MUXED_L1,
+ .mask = QSPI_INTERRUPTS_ALL,
+ },
+};
+
+static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
+{
+ u32 val = 0;
+
+ val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
+ qspi->bspi_maj_rev = (val >> 8) & 0xff;
+ qspi->bspi_min_rev = val & 0xff;
+ if (!(bcm_qspi_bspi_ver_three(qspi))) {
+ /* Force mapping of BSPI address -> flash offset */
+ bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
+ bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
+ }
+ qspi->bspi_enabled = 1;
+ bcm_qspi_disable_bspi(qspi);
+ bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
+ bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
+}
+
+static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
+{
+ struct bcm_qspi_parms parms;
+
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
+ bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
+ bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
+
+ parms.mode = SPI_MODE_3;
+ parms.bits_per_word = 8;
+ parms.speed_hz = qspi->max_speed_hz;
+ bcm_qspi_hw_set_parms(qspi, &parms);
+
+ if (has_bspi(qspi))
+ bcm_qspi_bspi_init(qspi);
+}
+
+static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
+{
+ u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
+
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
+ if (has_bspi(qspi))
+ bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
+
+ /* clear interrupt */
+ bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status & ~1);
+}
+
+static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
+ .exec_op = bcm_qspi_exec_mem_op,
+};
+
+struct bcm_qspi_data {
+ bool has_mspi_rev;
+ bool has_spcr3_sysclk;
+};
+
+static const struct bcm_qspi_data bcm_qspi_no_rev_data = {
+ .has_mspi_rev = false,
+ .has_spcr3_sysclk = false,
+};
+
+static const struct bcm_qspi_data bcm_qspi_rev_data = {
+ .has_mspi_rev = true,
+ .has_spcr3_sysclk = false,
+};
+
+static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
+ .has_mspi_rev = true,
+ .has_spcr3_sysclk = true,
+};
+
+static const struct of_device_id bcm_qspi_of_match[] = {
+ {
+ .compatible = "brcm,spi-bcm7445-qspi",
+ .data = &bcm_qspi_rev_data,
+
+ },
+ {
+ .compatible = "brcm,spi-bcm-qspi",
+ .data = &bcm_qspi_no_rev_data,
+ },
+ {
+ .compatible = "brcm,spi-bcm7216-qspi",
+ .data = &bcm_qspi_spcr3_data,
+ },
+ {
+ .compatible = "brcm,spi-bcm7278-qspi",
+ .data = &bcm_qspi_spcr3_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
+
+int bcm_qspi_probe(struct platform_device *pdev,
+ struct bcm_qspi_soc_intc *soc_intc)
+{
+ const struct of_device_id *of_id = NULL;
+ const struct bcm_qspi_data *data;
+ struct device *dev = &pdev->dev;
+ struct bcm_qspi *qspi;
+ struct spi_master *master;
+ struct resource *res;
+ int irq, ret = 0, num_ints = 0;
+ u32 val;
+ u32 rev = 0;
+ const char *name = NULL;
+ int num_irqs = ARRAY_SIZE(qspi_irq_tab);
+
+ /* We only support device-tree instantiation */
+ if (!dev->of_node)
+ return -ENODEV;
+
+ of_id = of_match_node(bcm_qspi_of_match, dev->of_node);
+ if (!of_id)
+ return -ENODEV;
+
+ data = of_id->data;
+
+ master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
+ if (!master) {
+ dev_err(dev, "error allocating spi_master\n");
+ return -ENOMEM;
+ }
+
+ qspi = spi_master_get_devdata(master);
+
+ qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(qspi->clk))
+ return PTR_ERR(qspi->clk);
+
+ qspi->pdev = pdev;
+ qspi->trans_pos.trans = NULL;
+ qspi->trans_pos.byte = 0;
+ qspi->trans_pos.mspi_last_trans = true;
+ qspi->master = master;
+
+ master->bus_num = -1;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_3WIRE;
+ master->setup = bcm_qspi_setup;
+ master->transfer_one = bcm_qspi_transfer_one;
+ master->mem_ops = &bcm_qspi_mem_ops;
+ master->cleanup = bcm_qspi_cleanup;
+ master->dev.of_node = dev->of_node;
+ master->num_chipselect = NUM_CHIPSELECT;
+ master->use_gpio_descriptors = true;
+
+ qspi->big_endian = of_device_is_big_endian(dev->of_node);
+
+ if (!of_property_read_u32(dev->of_node, "num-cs", &val))
+ master->num_chipselect = val;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
+ if (!res)
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mspi");
+
+ qspi->base[MSPI] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->base[MSPI]))
+ return PTR_ERR(qspi->base[MSPI]);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
+ if (res) {
+ qspi->base[BSPI] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->base[BSPI]))
+ return PTR_ERR(qspi->base[BSPI]);
+ qspi->bspi_mode = true;
+ } else {
+ qspi->bspi_mode = false;
+ }
+
+ dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
+ if (res) {
+ qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->base[CHIP_SELECT]))
+ return PTR_ERR(qspi->base[CHIP_SELECT]);
+ }
+
+ qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
+ GFP_KERNEL);
+ if (!qspi->dev_ids)
+ return -ENOMEM;
+
+ /*
+ * Some SoCs integrate spi controller (e.g., its interrupt bits)
+ * in specific ways
+ */
+ if (soc_intc) {
+ qspi->soc_intc = soc_intc;
+ soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
+ } else {
+ qspi->soc_intc = NULL;
+ }
+
+ if (qspi->clk) {
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto qspi_probe_err;
+ }
+ qspi->base_clk = clk_get_rate(qspi->clk);
+ } else {
+ qspi->base_clk = MSPI_BASE_FREQ;
+ }
+
+ if (data->has_mspi_rev) {
+ rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
+ /* some older revs do not have a MSPI_REV register */
+ if ((rev & 0xff) == 0xff)
+ rev = 0;
+ }
+
+ qspi->mspi_maj_rev = (rev >> 4) & 0xf;
+ qspi->mspi_min_rev = rev & 0xf;
+ qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
+
+ qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
+
+ /*
+ * On SW resets it is possible to have the mask still enabled
+ * Need to disable the mask and clear the status while we init
+ */
+ bcm_qspi_hw_uninit(qspi);
+
+ for (val = 0; val < num_irqs; val++) {
+ irq = -1;
+ name = qspi_irq_tab[val].irq_name;
+ if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
+ /* get the l2 interrupts */
+ irq = platform_get_irq_byname_optional(pdev, name);
+ } else if (!num_ints && soc_intc) {
+ /* all mspi, bspi intrs muxed to one L1 intr */
+ irq = platform_get_irq(pdev, 0);
+ }
+
+ if (irq >= 0) {
+ ret = devm_request_irq(&pdev->dev, irq,
+ qspi_irq_tab[val].irq_handler, 0,
+ name,
+ &qspi->dev_ids[val]);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "IRQ %s not found\n", name);
+ goto qspi_unprepare_err;
+ }
+
+ qspi->dev_ids[val].dev = qspi;
+ qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
+ num_ints++;
+ dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
+ qspi_irq_tab[val].irq_name,
+ irq);
+ }
+ }
+
+ if (!num_ints) {
+ dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
+ ret = -EINVAL;
+ goto qspi_unprepare_err;
+ }
+
+ bcm_qspi_hw_init(qspi);
+ init_completion(&qspi->mspi_done);
+ init_completion(&qspi->bspi_done);
+ qspi->curr_cs = -1;
+
+ platform_set_drvdata(pdev, qspi);
+
+ qspi->xfer_mode.width = -1;
+ qspi->xfer_mode.addrlen = -1;
+ qspi->xfer_mode.hp = -1;
+
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(dev, "can't register master\n");
+ goto qspi_reg_err;
+ }
+
+ return 0;
+
+qspi_reg_err:
+ bcm_qspi_hw_uninit(qspi);
+qspi_unprepare_err:
+ clk_disable_unprepare(qspi->clk);
+qspi_probe_err:
+ kfree(qspi->dev_ids);
+ return ret;
+}
+/* probe function to be called by SoC specific platform driver probe */
+EXPORT_SYMBOL_GPL(bcm_qspi_probe);
+
+int bcm_qspi_remove(struct platform_device *pdev)
+{
+ struct bcm_qspi *qspi = platform_get_drvdata(pdev);
+
+ spi_unregister_master(qspi->master);
+ bcm_qspi_hw_uninit(qspi);
+ clk_disable_unprepare(qspi->clk);
+ kfree(qspi->dev_ids);
+
+ return 0;
+}
+/* function to be called by SoC specific platform driver remove() */
+EXPORT_SYMBOL_GPL(bcm_qspi_remove);
+
+static int __maybe_unused bcm_qspi_suspend(struct device *dev)
+{
+ struct bcm_qspi *qspi = dev_get_drvdata(dev);
+
+ /* store the override strap value */
+ if (!bcm_qspi_bspi_ver_three(qspi))
+ qspi->s3_strap_override_ctrl =
+ bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
+
+ spi_master_suspend(qspi->master);
+ clk_disable_unprepare(qspi->clk);
+ bcm_qspi_hw_uninit(qspi);
+
+ return 0;
+};
+
+static int __maybe_unused bcm_qspi_resume(struct device *dev)
+{
+ struct bcm_qspi *qspi = dev_get_drvdata(dev);
+ int ret = 0;
+
+ bcm_qspi_hw_init(qspi);
+ bcm_qspi_chip_select(qspi, qspi->curr_cs);
+ if (qspi->soc_intc)
+ /* enable MSPI interrupt */
+ qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
+ true);
+
+ ret = clk_prepare_enable(qspi->clk);
+ if (!ret)
+ spi_master_resume(qspi->master);
+
+ return ret;
+}
+
+SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
+
+/* pm_ops to be called by SoC specific platform driver */
+EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
+
+MODULE_AUTHOR("Kamal Dasu");
+MODULE_DESCRIPTION("Broadcom QSPI driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-bcm-qspi.h b/drivers/spi/spi-bcm-qspi.h
new file mode 100644
index 000000000..01aec6460
--- /dev/null
+++ b/drivers/spi/spi-bcm-qspi.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2016 Broadcom
+ */
+
+#ifndef __SPI_BCM_QSPI_H__
+#define __SPI_BCM_QSPI_H__
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+/* BSPI interrupt masks */
+#define INTR_BSPI_LR_OVERREAD_MASK BIT(4)
+#define INTR_BSPI_LR_SESSION_DONE_MASK BIT(3)
+#define INTR_BSPI_LR_IMPATIENT_MASK BIT(2)
+#define INTR_BSPI_LR_SESSION_ABORTED_MASK BIT(1)
+#define INTR_BSPI_LR_FULLNESS_REACHED_MASK BIT(0)
+
+#define BSPI_LR_INTERRUPTS_DATA \
+ (INTR_BSPI_LR_SESSION_DONE_MASK | \
+ INTR_BSPI_LR_FULLNESS_REACHED_MASK)
+
+#define BSPI_LR_INTERRUPTS_ERROR \
+ (INTR_BSPI_LR_OVERREAD_MASK | \
+ INTR_BSPI_LR_IMPATIENT_MASK | \
+ INTR_BSPI_LR_SESSION_ABORTED_MASK)
+
+#define BSPI_LR_INTERRUPTS_ALL \
+ (BSPI_LR_INTERRUPTS_ERROR | \
+ BSPI_LR_INTERRUPTS_DATA)
+
+/* MSPI Interrupt masks */
+#define INTR_MSPI_HALTED_MASK BIT(6)
+#define INTR_MSPI_DONE_MASK BIT(5)
+
+#define MSPI_INTERRUPTS_ALL \
+ (INTR_MSPI_DONE_MASK | \
+ INTR_MSPI_HALTED_MASK)
+
+#define QSPI_INTERRUPTS_ALL \
+ (MSPI_INTERRUPTS_ALL | \
+ BSPI_LR_INTERRUPTS_ALL)
+
+struct platform_device;
+struct dev_pm_ops;
+
+enum {
+ MSPI_DONE = 0x1,
+ BSPI_DONE = 0x2,
+ BSPI_ERR = 0x4,
+ MSPI_BSPI_DONE = 0x7
+};
+
+struct bcm_qspi_soc_intc {
+ void (*bcm_qspi_int_ack)(struct bcm_qspi_soc_intc *soc_intc, int type);
+ void (*bcm_qspi_int_set)(struct bcm_qspi_soc_intc *soc_intc, int type,
+ bool en);
+ u32 (*bcm_qspi_get_int_status)(struct bcm_qspi_soc_intc *soc_intc);
+};
+
+/* Read controller register*/
+static inline u32 bcm_qspi_readl(bool be, void __iomem *addr)
+{
+ if (be)
+ return ioread32be(addr);
+ else
+ return readl_relaxed(addr);
+}
+
+/* Write controller register*/
+static inline void bcm_qspi_writel(bool be,
+ unsigned int data, void __iomem *addr)
+{
+ if (be)
+ iowrite32be(data, addr);
+ else
+ writel_relaxed(data, addr);
+}
+
+static inline u32 get_qspi_mask(int type)
+{
+ switch (type) {
+ case MSPI_DONE:
+ return INTR_MSPI_DONE_MASK;
+ case BSPI_DONE:
+ return BSPI_LR_INTERRUPTS_ALL;
+ case MSPI_BSPI_DONE:
+ return QSPI_INTERRUPTS_ALL;
+ case BSPI_ERR:
+ return BSPI_LR_INTERRUPTS_ERROR;
+ }
+
+ return 0;
+}
+
+/* The common driver functions to be called by the SoC platform driver */
+int bcm_qspi_probe(struct platform_device *pdev,
+ struct bcm_qspi_soc_intc *soc_intc);
+int bcm_qspi_remove(struct platform_device *pdev);
+
+/* pm_ops used by the SoC platform driver called on PM suspend/resume */
+extern const struct dev_pm_ops bcm_qspi_pm_ops;
+
+#endif /* __SPI_BCM_QSPI_H__ */
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
new file mode 100644
index 000000000..747e03228
--- /dev/null
+++ b/drivers/spi/spi-bcm2835.c
@@ -0,0 +1,1449 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom BCM2835 SPI Controllers
+ *
+ * Copyright (C) 2012 Chris Boot
+ * Copyright (C) 2013 Stephen Warren
+ * Copyright (C) 2015 Martin Sperl
+ *
+ * This driver is inspired by:
+ * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h> /* FIXME: using chip internals */
+#include <linux/gpio/driver.h> /* FIXME: using chip internals */
+#include <linux/of_irq.h>
+#include <linux/spi/spi.h>
+
+/* SPI register offsets */
+#define BCM2835_SPI_CS 0x00
+#define BCM2835_SPI_FIFO 0x04
+#define BCM2835_SPI_CLK 0x08
+#define BCM2835_SPI_DLEN 0x0c
+#define BCM2835_SPI_LTOH 0x10
+#define BCM2835_SPI_DC 0x14
+
+/* Bitfields in CS */
+#define BCM2835_SPI_CS_LEN_LONG 0x02000000
+#define BCM2835_SPI_CS_DMA_LEN 0x01000000
+#define BCM2835_SPI_CS_CSPOL2 0x00800000
+#define BCM2835_SPI_CS_CSPOL1 0x00400000
+#define BCM2835_SPI_CS_CSPOL0 0x00200000
+#define BCM2835_SPI_CS_RXF 0x00100000
+#define BCM2835_SPI_CS_RXR 0x00080000
+#define BCM2835_SPI_CS_TXD 0x00040000
+#define BCM2835_SPI_CS_RXD 0x00020000
+#define BCM2835_SPI_CS_DONE 0x00010000
+#define BCM2835_SPI_CS_LEN 0x00002000
+#define BCM2835_SPI_CS_REN 0x00001000
+#define BCM2835_SPI_CS_ADCS 0x00000800
+#define BCM2835_SPI_CS_INTR 0x00000400
+#define BCM2835_SPI_CS_INTD 0x00000200
+#define BCM2835_SPI_CS_DMAEN 0x00000100
+#define BCM2835_SPI_CS_TA 0x00000080
+#define BCM2835_SPI_CS_CSPOL 0x00000040
+#define BCM2835_SPI_CS_CLEAR_RX 0x00000020
+#define BCM2835_SPI_CS_CLEAR_TX 0x00000010
+#define BCM2835_SPI_CS_CPOL 0x00000008
+#define BCM2835_SPI_CS_CPHA 0x00000004
+#define BCM2835_SPI_CS_CS_10 0x00000002
+#define BCM2835_SPI_CS_CS_01 0x00000001
+
+#define BCM2835_SPI_FIFO_SIZE 64
+#define BCM2835_SPI_FIFO_SIZE_3_4 48
+#define BCM2835_SPI_DMA_MIN_LENGTH 96
+#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
+ | SPI_NO_CS | SPI_3WIRE)
+
+#define DRV_NAME "spi-bcm2835"
+
+/* define polling limits */
+static unsigned int polling_limit_us = 30;
+module_param(polling_limit_us, uint, 0664);
+MODULE_PARM_DESC(polling_limit_us,
+ "time in us to run a transfer in polling mode\n");
+
+/**
+ * struct bcm2835_spi - BCM2835 SPI controller
+ * @regs: base address of register map
+ * @clk: core clock, divided to calculate serial clock
+ * @clk_hz: core clock cached speed
+ * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
+ * @tfr: SPI transfer currently processed
+ * @ctlr: SPI controller reverse lookup
+ * @tx_buf: pointer whence next transmitted byte is read
+ * @rx_buf: pointer where next received byte is written
+ * @tx_len: remaining bytes to transmit
+ * @rx_len: remaining bytes to receive
+ * @tx_prologue: bytes transmitted without DMA if first TX sglist entry's
+ * length is not a multiple of 4 (to overcome hardware limitation)
+ * @rx_prologue: bytes received without DMA if first RX sglist entry's
+ * length is not a multiple of 4 (to overcome hardware limitation)
+ * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
+ * @debugfs_dir: the debugfs directory - neede to remove debugfs when
+ * unloading the module
+ * @count_transfer_polling: count of how often polling mode is used
+ * @count_transfer_irq: count of how often interrupt mode is used
+ * @count_transfer_irq_after_polling: count of how often we fall back to
+ * interrupt mode after starting in polling mode.
+ * These are counted as well in @count_transfer_polling and
+ * @count_transfer_irq
+ * @count_transfer_dma: count how often dma mode is used
+ * @slv: SPI slave currently selected
+ * (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
+ * @tx_dma_active: whether a TX DMA descriptor is in progress
+ * @rx_dma_active: whether a RX DMA descriptor is in progress
+ * (used by bcm2835_spi_dma_tx_done() to handle a race)
+ * @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
+ * (cyclically copies from zero page to TX FIFO)
+ * @fill_tx_addr: bus address of zero page
+ */
+struct bcm2835_spi {
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long clk_hz;
+ int irq;
+ struct spi_transfer *tfr;
+ struct spi_controller *ctlr;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int tx_len;
+ int rx_len;
+ int tx_prologue;
+ int rx_prologue;
+ unsigned int tx_spillover;
+
+ struct dentry *debugfs_dir;
+ u64 count_transfer_polling;
+ u64 count_transfer_irq;
+ u64 count_transfer_irq_after_polling;
+ u64 count_transfer_dma;
+
+ struct bcm2835_spidev *slv;
+ unsigned int tx_dma_active;
+ unsigned int rx_dma_active;
+ struct dma_async_tx_descriptor *fill_tx_desc;
+ dma_addr_t fill_tx_addr;
+};
+
+/**
+ * struct bcm2835_spidev - BCM2835 SPI slave
+ * @prepare_cs: precalculated CS register value for ->prepare_message()
+ * (uses slave-specific clock polarity and phase settings)
+ * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
+ * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
+ * @clear_rx_addr: bus address of @clear_rx_cs
+ * @clear_rx_cs: precalculated CS register value to clear RX FIFO
+ * (uses slave-specific clock polarity and phase settings)
+ */
+struct bcm2835_spidev {
+ u32 prepare_cs;
+ struct dma_async_tx_descriptor *clear_rx_desc;
+ dma_addr_t clear_rx_addr;
+ u32 clear_rx_cs ____cacheline_aligned;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
+ const char *dname)
+{
+ char name[64];
+ struct dentry *dir;
+
+ /* get full name */
+ snprintf(name, sizeof(name), "spi-bcm2835-%s", dname);
+
+ /* the base directory */
+ dir = debugfs_create_dir(name, NULL);
+ bs->debugfs_dir = dir;
+
+ /* the counters */
+ debugfs_create_u64("count_transfer_polling", 0444, dir,
+ &bs->count_transfer_polling);
+ debugfs_create_u64("count_transfer_irq", 0444, dir,
+ &bs->count_transfer_irq);
+ debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir,
+ &bs->count_transfer_irq_after_polling);
+ debugfs_create_u64("count_transfer_dma", 0444, dir,
+ &bs->count_transfer_dma);
+}
+
+static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
+{
+ debugfs_remove_recursive(bs->debugfs_dir);
+ bs->debugfs_dir = NULL;
+}
+#else
+static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
+ const char *dname)
+{
+}
+
+static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned int reg)
+{
+ return readl(bs->regs + reg);
+}
+
+static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned int reg, u32 val)
+{
+ writel(val, bs->regs + reg);
+}
+
+static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
+{
+ u8 byte;
+
+ while ((bs->rx_len) &&
+ (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
+ byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
+ if (bs->rx_buf)
+ *bs->rx_buf++ = byte;
+ bs->rx_len--;
+ }
+}
+
+static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
+{
+ u8 byte;
+
+ while ((bs->tx_len) &&
+ (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
+ byte = bs->tx_buf ? *bs->tx_buf++ : 0;
+ bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
+ bs->tx_len--;
+ }
+}
+
+/**
+ * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO
+ * @bs: BCM2835 SPI controller
+ * @count: bytes to read from RX FIFO
+ *
+ * The caller must ensure that @bs->rx_len is greater than or equal to @count,
+ * that the RX FIFO contains at least @count bytes and that the DMA Enable flag
+ * in the CS register is set (such that a read from the FIFO register receives
+ * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL.
+ */
+static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count)
+{
+ u32 val;
+ int len;
+
+ bs->rx_len -= count;
+
+ do {
+ val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
+ len = min(count, 4);
+ memcpy(bs->rx_buf, &val, len);
+ bs->rx_buf += len;
+ count -= 4;
+ } while (count > 0);
+}
+
+/**
+ * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO
+ * @bs: BCM2835 SPI controller
+ * @count: bytes to write to TX FIFO
+ *
+ * The caller must ensure that @bs->tx_len is greater than or equal to @count,
+ * that the TX FIFO can accommodate @count bytes and that the DMA Enable flag
+ * in the CS register is set (such that a write to the FIFO register transmits
+ * 32-bit instead of just 8-bit).
+ */
+static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
+{
+ u32 val;
+ int len;
+
+ bs->tx_len -= count;
+
+ do {
+ if (bs->tx_buf) {
+ len = min(count, 4);
+ memcpy(&val, bs->tx_buf, len);
+ bs->tx_buf += len;
+ } else {
+ val = 0;
+ }
+ bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
+ count -= 4;
+ } while (count > 0);
+}
+
+/**
+ * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty
+ * @bs: BCM2835 SPI controller
+ *
+ * The caller must ensure that the RX FIFO can accommodate as many bytes
+ * as have been written to the TX FIFO: Transmission is halted once the
+ * RX FIFO is full, causing this function to spin forever.
+ */
+static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs)
+{
+ while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
+ cpu_relax();
+}
+
+/**
+ * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO
+ * @bs: BCM2835 SPI controller
+ * @count: bytes available for reading in RX FIFO
+ */
+static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count)
+{
+ u8 val;
+
+ count = min(count, bs->rx_len);
+ bs->rx_len -= count;
+
+ do {
+ val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
+ if (bs->rx_buf)
+ *bs->rx_buf++ = val;
+ } while (--count);
+}
+
+/**
+ * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO
+ * @bs: BCM2835 SPI controller
+ * @count: bytes available for writing in TX FIFO
+ */
+static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
+{
+ u8 val;
+
+ count = min(count, bs->tx_len);
+ bs->tx_len -= count;
+
+ do {
+ val = bs->tx_buf ? *bs->tx_buf++ : 0;
+ bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
+ } while (--count);
+}
+
+static void bcm2835_spi_reset_hw(struct bcm2835_spi *bs)
+{
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ /* Disable SPI interrupts and transfer */
+ cs &= ~(BCM2835_SPI_CS_INTR |
+ BCM2835_SPI_CS_INTD |
+ BCM2835_SPI_CS_DMAEN |
+ BCM2835_SPI_CS_TA);
+ /*
+ * Transmission sometimes breaks unless the DONE bit is written at the
+ * end of every transfer. The spec says it's a RO bit. Either the
+ * spec is wrong and the bit is actually of type RW1C, or it's a
+ * hardware erratum.
+ */
+ cs |= BCM2835_SPI_CS_DONE;
+ /* and reset RX/TX FIFOS */
+ cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
+
+ /* and reset the SPI_HW */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+ /* as well as DLEN */
+ bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
+}
+
+static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
+{
+ struct bcm2835_spi *bs = dev_id;
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ /* Bail out early if interrupts are not enabled */
+ if (!(cs & BCM2835_SPI_CS_INTR))
+ return IRQ_NONE;
+
+ /*
+ * An interrupt is signaled either if DONE is set (TX FIFO empty)
+ * or if RXR is set (RX FIFO >= ¾ full).
+ */
+ if (cs & BCM2835_SPI_CS_RXF)
+ bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
+ else if (cs & BCM2835_SPI_CS_RXR)
+ bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4);
+
+ if (bs->tx_len && cs & BCM2835_SPI_CS_DONE)
+ bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
+
+ /* Read as many bytes as possible from FIFO */
+ bcm2835_rd_fifo(bs);
+ /* Write as many bytes as possible to FIFO */
+ bcm2835_wr_fifo(bs);
+
+ if (!bs->rx_len) {
+ /* Transfer complete - reset SPI HW */
+ bcm2835_spi_reset_hw(bs);
+ /* wake up the framework */
+ spi_finalize_current_transfer(bs->ctlr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *tfr,
+ u32 cs, bool fifo_empty)
+{
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+ /* update usage statistics */
+ bs->count_transfer_irq++;
+
+ /*
+ * Enable HW block, but with interrupts still disabled.
+ * Otherwise the empty TX FIFO would immediately trigger an interrupt.
+ */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
+
+ /* fill TX FIFO as much as possible */
+ if (fifo_empty)
+ bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
+ bcm2835_wr_fifo(bs);
+
+ /* enable interrupts */
+ cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+ /* signal that we need to wait for completion */
+ return 1;
+}
+
+/**
+ * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
+ * @ctlr: SPI master controller
+ * @tfr: SPI transfer
+ * @bs: BCM2835 SPI controller
+ * @cs: CS register
+ *
+ * A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
+ * Only the final write access is permitted to transmit less than 4 bytes, the
+ * SPI controller deduces its intended size from the DLEN register.
+ *
+ * If a TX or RX sglist contains multiple entries, one per page, and the first
+ * entry starts in the middle of a page, that first entry's length may not be
+ * a multiple of 4. Subsequent entries are fine because they span an entire
+ * page, hence do have a length that's a multiple of 4.
+ *
+ * This cannot happen with kmalloc'ed buffers (which is what most clients use)
+ * because they are contiguous in physical memory and therefore not split on
+ * page boundaries by spi_map_buf(). But it *can* happen with vmalloc'ed
+ * buffers.
+ *
+ * The DMA engine is incapable of combining sglist entries into a continuous
+ * stream of 4 byte chunks, it treats every entry separately: A TX entry is
+ * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
+ * entry is rounded up by throwing away received bytes.
+ *
+ * Overcome this limitation by transferring the first few bytes without DMA:
+ * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
+ * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
+ * The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
+ * the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
+ *
+ * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
+ * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
+ * Caution, the additional 4 bytes spill over to the second TX sglist entry
+ * if the length of the first is *exactly* 1.
+ *
+ * At most 6 bytes are written and at most 3 bytes read. Do we know the
+ * transfer has this many bytes? Yes, see BCM2835_SPI_DMA_MIN_LENGTH.
+ *
+ * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
+ * by the DMA engine. Toggling the DMA Enable flag in the CS register switches
+ * the width but also garbles the FIFO's contents. The prologue must therefore
+ * be transmitted in 32-bit width to ensure that the following DMA transfer can
+ * pick up the residue in the RX FIFO in ungarbled form.
+ */
+static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
+ struct spi_transfer *tfr,
+ struct bcm2835_spi *bs,
+ u32 cs)
+{
+ int tx_remaining;
+
+ bs->tfr = tfr;
+ bs->tx_prologue = 0;
+ bs->rx_prologue = 0;
+ bs->tx_spillover = false;
+
+ if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0]))
+ bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
+
+ if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) {
+ bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
+
+ if (bs->rx_prologue > bs->tx_prologue) {
+ if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) {
+ bs->tx_prologue = bs->rx_prologue;
+ } else {
+ bs->tx_prologue += 4;
+ bs->tx_spillover =
+ !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
+ }
+ }
+ }
+
+ /* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */
+ if (!bs->tx_prologue)
+ return;
+
+ /* Write and read RX prologue. Adjust first entry in RX sglist. */
+ if (bs->rx_prologue) {
+ bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
+ | BCM2835_SPI_CS_DMAEN);
+ bcm2835_wr_fifo_count(bs, bs->rx_prologue);
+ bcm2835_wait_tx_fifo_empty(bs);
+ bcm2835_rd_fifo_count(bs, bs->rx_prologue);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX
+ | BCM2835_SPI_CS_CLEAR_TX
+ | BCM2835_SPI_CS_DONE);
+
+ dma_sync_single_for_device(ctlr->dma_rx->device->dev,
+ sg_dma_address(&tfr->rx_sg.sgl[0]),
+ bs->rx_prologue, DMA_FROM_DEVICE);
+
+ sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
+ sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
+ }
+
+ if (!bs->tx_buf)
+ return;
+
+ /*
+ * Write remaining TX prologue. Adjust first entry in TX sglist.
+ * Also adjust second entry if prologue spills over to it.
+ */
+ tx_remaining = bs->tx_prologue - bs->rx_prologue;
+ if (tx_remaining) {
+ bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
+ | BCM2835_SPI_CS_DMAEN);
+ bcm2835_wr_fifo_count(bs, tx_remaining);
+ bcm2835_wait_tx_fifo_empty(bs);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX
+ | BCM2835_SPI_CS_DONE);
+ }
+
+ if (likely(!bs->tx_spillover)) {
+ sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
+ sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
+ } else {
+ sg_dma_len(&tfr->tx_sg.sgl[0]) = 0;
+ sg_dma_address(&tfr->tx_sg.sgl[1]) += 4;
+ sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4;
+ }
+}
+
+/**
+ * bcm2835_spi_undo_prologue() - reconstruct original sglist state
+ * @bs: BCM2835 SPI controller
+ *
+ * Undo changes which were made to an SPI transfer's sglist when transmitting
+ * the prologue. This is necessary to ensure the same memory ranges are
+ * unmapped that were originally mapped.
+ */
+static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
+{
+ struct spi_transfer *tfr = bs->tfr;
+
+ if (!bs->tx_prologue)
+ return;
+
+ if (bs->rx_prologue) {
+ sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
+ sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
+ }
+
+ if (!bs->tx_buf)
+ goto out;
+
+ if (likely(!bs->tx_spillover)) {
+ sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
+ sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
+ } else {
+ sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4;
+ sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
+ sg_dma_len(&tfr->tx_sg.sgl[1]) += 4;
+ }
+out:
+ bs->tx_prologue = 0;
+}
+
+/**
+ * bcm2835_spi_dma_rx_done() - callback for DMA RX channel
+ * @data: SPI master controller
+ *
+ * Used for bidirectional and RX-only transfers.
+ */
+static void bcm2835_spi_dma_rx_done(void *data)
+{
+ struct spi_controller *ctlr = data;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+ /* terminate tx-dma as we do not have an irq for it
+ * because when the rx dma will terminate and this callback
+ * is called the tx-dma must have finished - can't get to this
+ * situation otherwise...
+ */
+ dmaengine_terminate_async(ctlr->dma_tx);
+ bs->tx_dma_active = false;
+ bs->rx_dma_active = false;
+ bcm2835_spi_undo_prologue(bs);
+
+ /* reset fifo and HW */
+ bcm2835_spi_reset_hw(bs);
+
+ /* and mark as completed */;
+ spi_finalize_current_transfer(ctlr);
+}
+
+/**
+ * bcm2835_spi_dma_tx_done() - callback for DMA TX channel
+ * @data: SPI master controller
+ *
+ * Used for TX-only transfers.
+ */
+static void bcm2835_spi_dma_tx_done(void *data)
+{
+ struct spi_controller *ctlr = data;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+ /* busy-wait for TX FIFO to empty */
+ while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
+ bcm2835_wr(bs, BCM2835_SPI_CS, bs->slv->clear_rx_cs);
+
+ bs->tx_dma_active = false;
+ smp_wmb();
+
+ /*
+ * In case of a very short transfer, RX DMA may not have been
+ * issued yet. The onus is then on bcm2835_spi_transfer_one_dma()
+ * to terminate it immediately after issuing.
+ */
+ if (cmpxchg(&bs->rx_dma_active, true, false))
+ dmaengine_terminate_async(ctlr->dma_rx);
+
+ bcm2835_spi_undo_prologue(bs);
+ bcm2835_spi_reset_hw(bs);
+ spi_finalize_current_transfer(ctlr);
+}
+
+/**
+ * bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
+ * @ctlr: SPI master controller
+ * @tfr: SPI transfer
+ * @bs: BCM2835 SPI controller
+ * @slv: BCM2835 SPI slave
+ * @is_tx: whether to submit DMA descriptor for TX or RX sglist
+ *
+ * Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
+ * Return 0 on success or a negative error number.
+ */
+static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
+ struct spi_transfer *tfr,
+ struct bcm2835_spi *bs,
+ struct bcm2835_spidev *slv,
+ bool is_tx)
+{
+ struct dma_chan *chan;
+ struct scatterlist *sgl;
+ unsigned int nents;
+ enum dma_transfer_direction dir;
+ unsigned long flags;
+
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+
+ if (is_tx) {
+ dir = DMA_MEM_TO_DEV;
+ chan = ctlr->dma_tx;
+ nents = tfr->tx_sg.nents;
+ sgl = tfr->tx_sg.sgl;
+ flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT;
+ } else {
+ dir = DMA_DEV_TO_MEM;
+ chan = ctlr->dma_rx;
+ nents = tfr->rx_sg.nents;
+ sgl = tfr->rx_sg.sgl;
+ flags = DMA_PREP_INTERRUPT;
+ }
+ /* prepare the channel */
+ desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
+ if (!desc)
+ return -EINVAL;
+
+ /*
+ * Completion is signaled by the RX channel for bidirectional and
+ * RX-only transfers; else by the TX channel for TX-only transfers.
+ */
+ if (!is_tx) {
+ desc->callback = bcm2835_spi_dma_rx_done;
+ desc->callback_param = ctlr;
+ } else if (!tfr->rx_buf) {
+ desc->callback = bcm2835_spi_dma_tx_done;
+ desc->callback_param = ctlr;
+ bs->slv = slv;
+ }
+
+ /* submit it to DMA-engine */
+ cookie = dmaengine_submit(desc);
+
+ return dma_submit_error(cookie);
+}
+
+/**
+ * bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
+ * @ctlr: SPI master controller
+ * @tfr: SPI transfer
+ * @slv: BCM2835 SPI slave
+ * @cs: CS register
+ *
+ * For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
+ * the TX and RX DMA channel to copy between memory and FIFO register.
+ *
+ * For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to
+ * memory is pointless. However not reading the RX FIFO isn't an option either
+ * because transmission is halted once it's full. As a workaround, cyclically
+ * clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
+ *
+ * The CS register value is precalculated in bcm2835_spi_setup(). Normally
+ * this is called only once, on slave registration. A DMA descriptor to write
+ * this value is preallocated in bcm2835_dma_init(). All that's left to do
+ * when performing a TX-only transfer is to submit this descriptor to the RX
+ * DMA channel. Latency is thereby minimized. The descriptor does not
+ * generate any interrupts while running. It must be terminated once the
+ * TX DMA channel is done.
+ *
+ * Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted
+ * when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC
+ * register.) Reading 32 bytes from the RX FIFO would normally require 8 bus
+ * accesses, whereas clearing it requires only 1 bus access. So an 8-fold
+ * reduction in bus traffic and thus energy consumption is achieved.
+ *
+ * For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically
+ * copying from the zero page. The DMA descriptor to do this is preallocated
+ * in bcm2835_dma_init(). It must be terminated once the RX DMA channel is
+ * done and can then be reused.
+ *
+ * The BCM2835 DMA driver autodetects when a transaction copies from the zero
+ * page and utilizes the DMA controller's ability to synthesize zeroes instead
+ * of copying them from memory. This reduces traffic on the memory bus. The
+ * feature is not available on so-called "lite" channels, but normally TX DMA
+ * is backed by a full-featured channel.
+ *
+ * Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the
+ * BCM2835 SPI controller continues to assert DREQ even after the DLEN register
+ * has been counted down to zero (hardware erratum). Thus, when the transfer
+ * has finished, the DMA engine zero-fills the TX FIFO until it is half full.
+ * (Tuneable with the DC register.) So up to 9 gratuitous bus accesses are
+ * performed at the end of an RX-only transfer.
+ */
+static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
+ struct spi_transfer *tfr,
+ struct bcm2835_spidev *slv,
+ u32 cs)
+{
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ dma_cookie_t cookie;
+ int ret;
+
+ /* update usage statistics */
+ bs->count_transfer_dma++;
+
+ /*
+ * Transfer first few bytes without DMA if length of first TX or RX
+ * sglist entry is not a multiple of 4 bytes (hardware limitation).
+ */
+ bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
+
+ /* setup tx-DMA */
+ if (bs->tx_buf) {
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, true);
+ } else {
+ cookie = dmaengine_submit(bs->fill_tx_desc);
+ ret = dma_submit_error(cookie);
+ }
+ if (ret)
+ goto err_reset_hw;
+
+ /* set the DMA length */
+ bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
+
+ /* start the HW */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
+
+ bs->tx_dma_active = true;
+ smp_wmb();
+
+ /* start TX early */
+ dma_async_issue_pending(ctlr->dma_tx);
+
+ /* setup rx-DMA late - to run transfers while
+ * mapping of the rx buffers still takes place
+ * this saves 10us or more.
+ */
+ if (bs->rx_buf) {
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, false);
+ } else {
+ cookie = dmaengine_submit(slv->clear_rx_desc);
+ ret = dma_submit_error(cookie);
+ }
+ if (ret) {
+ /* need to reset on errors */
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ bs->tx_dma_active = false;
+ goto err_reset_hw;
+ }
+
+ /* start rx dma late */
+ dma_async_issue_pending(ctlr->dma_rx);
+ bs->rx_dma_active = true;
+ smp_mb();
+
+ /*
+ * In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done()
+ * may run before RX DMA is issued. Terminate RX DMA if so.
+ */
+ if (!bs->rx_buf && !bs->tx_dma_active &&
+ cmpxchg(&bs->rx_dma_active, true, false)) {
+ dmaengine_terminate_async(ctlr->dma_rx);
+ bcm2835_spi_reset_hw(bs);
+ }
+
+ /* wait for wakeup in framework */
+ return 1;
+
+err_reset_hw:
+ bcm2835_spi_reset_hw(bs);
+ bcm2835_spi_undo_prologue(bs);
+ return ret;
+}
+
+static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ /* we start DMA efforts only on bigger transfers */
+ if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
+ return false;
+
+ /* return OK */
+ return true;
+}
+
+static void bcm2835_dma_release(struct spi_controller *ctlr,
+ struct bcm2835_spi *bs)
+{
+ if (ctlr->dma_tx) {
+ dmaengine_terminate_sync(ctlr->dma_tx);
+
+ if (bs->fill_tx_desc)
+ dmaengine_desc_free(bs->fill_tx_desc);
+
+ if (bs->fill_tx_addr)
+ dma_unmap_page_attrs(ctlr->dma_tx->device->dev,
+ bs->fill_tx_addr, sizeof(u32),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
+ dma_release_channel(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
+ }
+
+ if (ctlr->dma_rx) {
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ dma_release_channel(ctlr->dma_rx);
+ ctlr->dma_rx = NULL;
+ }
+}
+
+static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
+ struct bcm2835_spi *bs)
+{
+ struct dma_slave_config slave_config;
+ const __be32 *addr;
+ dma_addr_t dma_reg_base;
+ int ret;
+
+ /* base address in dma-space */
+ addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
+ if (!addr) {
+ dev_err(dev, "could not get DMA-register address - not using dma mode\n");
+ /* Fall back to interrupt mode */
+ return 0;
+ }
+ dma_reg_base = be32_to_cpup(addr);
+
+ /* get tx/rx dma */
+ ctlr->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ctlr->dma_tx)) {
+ dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
+ ret = PTR_ERR(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
+ goto err;
+ }
+ ctlr->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ctlr->dma_rx)) {
+ dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
+ ret = PTR_ERR(ctlr->dma_rx);
+ ctlr->dma_rx = NULL;
+ goto err_release;
+ }
+
+ /*
+ * The TX DMA channel either copies a transfer's TX buffer to the FIFO
+ * or, in case of an RX-only transfer, cyclically copies from the zero
+ * page to the FIFO using a preallocated, reusable descriptor.
+ */
+ slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
+ if (ret)
+ goto err_config;
+
+ bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev,
+ ZERO_PAGE(0), 0, sizeof(u32),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) {
+ dev_err(dev, "cannot map zero page - not using DMA mode\n");
+ bs->fill_tx_addr = 0;
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx,
+ bs->fill_tx_addr,
+ sizeof(u32), 0,
+ DMA_MEM_TO_DEV, 0);
+ if (!bs->fill_tx_desc) {
+ dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n");
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ ret = dmaengine_desc_set_reuse(bs->fill_tx_desc);
+ if (ret) {
+ dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n");
+ goto err_release;
+ }
+
+ /*
+ * The RX DMA channel is used bidirectionally: It either reads the
+ * RX FIFO or, in case of a TX-only transfer, cyclically writes a
+ * precalculated value to the CS register to clear the RX FIFO.
+ */
+ slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS);
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
+ if (ret)
+ goto err_config;
+
+ /* all went well, so set can_dma */
+ ctlr->can_dma = bcm2835_spi_can_dma;
+
+ return 0;
+
+err_config:
+ dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
+ ret);
+err_release:
+ bcm2835_dma_release(ctlr, bs);
+err:
+ /*
+ * Only report error for deferred probing, otherwise fall back to
+ * interrupt mode
+ */
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
+}
+
+static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *tfr,
+ u32 cs)
+{
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ unsigned long timeout;
+
+ /* update usage statistics */
+ bs->count_transfer_polling++;
+
+ /* enable HW block without interrupts */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
+
+ /* fill in the fifo before timeout calculations
+ * if we are interrupted here, then the data is
+ * getting transferred by the HW while we are interrupted
+ */
+ bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
+
+ /* set the timeout to at least 2 jiffies */
+ timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
+
+ /* loop until finished the transfer */
+ while (bs->rx_len) {
+ /* fill in tx fifo with remaining data */
+ bcm2835_wr_fifo(bs);
+
+ /* read from fifo as much as possible */
+ bcm2835_rd_fifo(bs);
+
+ /* if there is still data pending to read
+ * then check the timeout
+ */
+ if (bs->rx_len && time_after(jiffies, timeout)) {
+ dev_dbg_ratelimited(&spi->dev,
+ "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
+ jiffies - timeout,
+ bs->tx_len, bs->rx_len);
+ /* fall back to interrupt mode */
+
+ /* update usage statistics */
+ bs->count_transfer_irq_after_polling++;
+
+ return bcm2835_spi_transfer_one_irq(ctlr, spi,
+ tfr, cs, false);
+ }
+ }
+
+ /* Transfer complete - reset SPI HW */
+ bcm2835_spi_reset_hw(bs);
+ /* and return without waiting for completion */
+ return 0;
+}
+
+static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+ unsigned long spi_hz, cdiv;
+ unsigned long hz_per_byte, byte_limit;
+ u32 cs = slv->prepare_cs;
+
+ /* set clock */
+ spi_hz = tfr->speed_hz;
+
+ if (spi_hz >= bs->clk_hz / 2) {
+ cdiv = 2; /* clk_hz/2 is the fastest we can go */
+ } else if (spi_hz) {
+ /* CDIV must be a multiple of two */
+ cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz);
+ cdiv += (cdiv % 2);
+
+ if (cdiv >= 65536)
+ cdiv = 0; /* 0 is the slowest we can go */
+ } else {
+ cdiv = 0; /* 0 is the slowest we can go */
+ }
+ tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
+ bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
+
+ /* handle all the 3-wire mode */
+ if (spi->mode & SPI_3WIRE && tfr->rx_buf)
+ cs |= BCM2835_SPI_CS_REN;
+
+ /* set transmit buffers and length */
+ bs->tx_buf = tfr->tx_buf;
+ bs->rx_buf = tfr->rx_buf;
+ bs->tx_len = tfr->len;
+ bs->rx_len = tfr->len;
+
+ /* Calculate the estimated time in us the transfer runs. Note that
+ * there is 1 idle clocks cycles after each byte getting transferred
+ * so we have 9 cycles/byte. This is used to find the number of Hz
+ * per byte per polling limit. E.g., we can transfer 1 byte in 30 us
+ * per 300,000 Hz of bus clock.
+ */
+ hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
+ byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1;
+
+ /* run in polling mode for short transfers */
+ if (tfr->len < byte_limit)
+ return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs);
+
+ /* run in dma mode if conditions are right
+ * Note that unlike poll or interrupt mode DMA mode does not have
+ * this 1 idle clock cycle pattern but runs the spi clock without gaps
+ */
+ if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
+ return bcm2835_spi_transfer_one_dma(ctlr, tfr, slv, cs);
+
+ /* run in interrupt-mode */
+ return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
+}
+
+static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct spi_device *spi = msg->spi;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+ int ret;
+
+ if (ctlr->can_dma) {
+ /*
+ * DMA transfers are limited to 16 bit (0 to 65535 bytes) by
+ * the SPI HW due to DLEN. Split up transfers (32-bit FIFO
+ * aligned) if the limit is exceeded.
+ */
+ ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
+ GFP_KERNEL | GFP_DMA);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Set up clock polarity before spi_transfer_one_message() asserts
+ * chip select to avoid a gratuitous clock signal edge.
+ */
+ bcm2835_wr(bs, BCM2835_SPI_CS, slv->prepare_cs);
+
+ return 0;
+}
+
+static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+ /* if an error occurred and we have an active dma, then terminate */
+ if (ctlr->dma_tx) {
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ bs->tx_dma_active = false;
+ }
+ if (ctlr->dma_rx) {
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ bs->rx_dma_active = false;
+ }
+ bcm2835_spi_undo_prologue(bs);
+
+ /* and reset */
+ bcm2835_spi_reset_hw(bs);
+}
+
+static int chip_match_name(struct gpio_chip *chip, void *data)
+{
+ return !strcmp(chip->label, data);
+}
+
+static void bcm2835_spi_cleanup(struct spi_device *spi)
+{
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+ struct spi_controller *ctlr = spi->controller;
+
+ if (slv->clear_rx_desc)
+ dmaengine_desc_free(slv->clear_rx_desc);
+
+ if (slv->clear_rx_addr)
+ dma_unmap_single(ctlr->dma_rx->device->dev,
+ slv->clear_rx_addr,
+ sizeof(u32),
+ DMA_TO_DEVICE);
+
+ kfree(slv);
+}
+
+static int bcm2835_spi_setup_dma(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct bcm2835_spi *bs,
+ struct bcm2835_spidev *slv)
+{
+ int ret;
+
+ if (!ctlr->dma_rx)
+ return 0;
+
+ slv->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
+ &slv->clear_rx_cs,
+ sizeof(u32),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctlr->dma_rx->device->dev, slv->clear_rx_addr)) {
+ dev_err(&spi->dev, "cannot map clear_rx_cs\n");
+ slv->clear_rx_addr = 0;
+ return -ENOMEM;
+ }
+
+ slv->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
+ slv->clear_rx_addr,
+ sizeof(u32), 0,
+ DMA_MEM_TO_DEV, 0);
+ if (!slv->clear_rx_desc) {
+ dev_err(&spi->dev, "cannot prepare clear_rx_desc\n");
+ return -ENOMEM;
+ }
+
+ ret = dmaengine_desc_set_reuse(slv->clear_rx_desc);
+ if (ret) {
+ dev_err(&spi->dev, "cannot reuse clear_rx_desc\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bcm2835_spi_setup(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+ struct gpio_chip *chip;
+ int ret;
+ u32 cs;
+
+ if (!slv) {
+ slv = kzalloc(ALIGN(sizeof(*slv), dma_get_cache_alignment()),
+ GFP_KERNEL);
+ if (!slv)
+ return -ENOMEM;
+
+ spi_set_ctldata(spi, slv);
+
+ ret = bcm2835_spi_setup_dma(ctlr, spi, bs, slv);
+ if (ret)
+ goto err_cleanup;
+ }
+
+ /*
+ * Precalculate SPI slave's CS register value for ->prepare_message():
+ * The driver always uses software-controlled GPIO chip select, hence
+ * set the hardware-controlled native chip select to an invalid value
+ * to prevent it from interfering.
+ */
+ cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+ if (spi->mode & SPI_CPOL)
+ cs |= BCM2835_SPI_CS_CPOL;
+ if (spi->mode & SPI_CPHA)
+ cs |= BCM2835_SPI_CS_CPHA;
+ slv->prepare_cs = cs;
+
+ /*
+ * Precalculate SPI slave's CS register value to clear RX FIFO
+ * in case of a TX-only DMA transfer.
+ */
+ if (ctlr->dma_rx) {
+ slv->clear_rx_cs = cs | BCM2835_SPI_CS_TA |
+ BCM2835_SPI_CS_DMAEN |
+ BCM2835_SPI_CS_CLEAR_RX;
+ dma_sync_single_for_device(ctlr->dma_rx->device->dev,
+ slv->clear_rx_addr,
+ sizeof(u32),
+ DMA_TO_DEVICE);
+ }
+
+ /*
+ * sanity checking the native-chipselects
+ */
+ if (spi->mode & SPI_NO_CS)
+ return 0;
+ /*
+ * The SPI core has successfully requested the CS GPIO line from the
+ * device tree, so we are done.
+ */
+ if (spi->cs_gpiod)
+ return 0;
+ if (spi->chip_select > 1) {
+ /* error in the case of native CS requested with CS > 1
+ * officially there is a CS2, but it is not documented
+ * which GPIO is connected with that...
+ */
+ dev_err(&spi->dev,
+ "setup: only two native chip-selects are supported\n");
+ ret = -EINVAL;
+ goto err_cleanup;
+ }
+
+ /*
+ * Translate native CS to GPIO
+ *
+ * FIXME: poking around in the gpiolib internals like this is
+ * not very good practice. Find a way to locate the real problem
+ * and fix it. Why is the GPIO descriptor in spi->cs_gpiod
+ * sometimes not assigned correctly? Erroneous device trees?
+ */
+
+ /* get the gpio chip for the base */
+ chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
+ if (!chip)
+ return 0;
+
+ spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
+ DRV_NAME,
+ GPIO_LOOKUP_FLAGS_DEFAULT,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(spi->cs_gpiod)) {
+ ret = PTR_ERR(spi->cs_gpiod);
+ goto err_cleanup;
+ }
+
+ /* and set up the "mode" and level */
+ dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
+ spi->chip_select);
+
+ return 0;
+
+err_cleanup:
+ bcm2835_spi_cleanup(spi);
+ return ret;
+}
+
+static int bcm2835_spi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct bcm2835_spi *bs;
+ int err;
+
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!ctlr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ctlr);
+
+ ctlr->use_gpio_descriptors = true;
+ ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->num_chipselect = 3;
+ ctlr->setup = bcm2835_spi_setup;
+ ctlr->cleanup = bcm2835_spi_cleanup;
+ ctlr->transfer_one = bcm2835_spi_transfer_one;
+ ctlr->handle_err = bcm2835_spi_handle_err;
+ ctlr->prepare_message = bcm2835_spi_prepare_message;
+ ctlr->dev.of_node = pdev->dev.of_node;
+
+ bs = spi_controller_get_devdata(ctlr);
+ bs->ctlr = ctlr;
+
+ bs->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bs->regs))
+ return PTR_ERR(bs->regs);
+
+ bs->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(bs->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
+ "could not get clk\n");
+
+ ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2;
+
+ bs->irq = platform_get_irq(pdev, 0);
+ if (bs->irq <= 0)
+ return bs->irq ? bs->irq : -ENODEV;
+
+ clk_prepare_enable(bs->clk);
+ bs->clk_hz = clk_get_rate(bs->clk);
+
+ err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
+ if (err)
+ goto out_clk_disable;
+
+ /* initialise the hardware with the default polarities */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+ err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt,
+ IRQF_SHARED, dev_name(&pdev->dev), bs);
+ if (err) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
+ goto out_dma_release;
+ }
+
+ err = spi_register_controller(ctlr);
+ if (err) {
+ dev_err(&pdev->dev, "could not register SPI controller: %d\n",
+ err);
+ goto out_dma_release;
+ }
+
+ bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
+
+ return 0;
+
+out_dma_release:
+ bcm2835_dma_release(ctlr, bs);
+out_clk_disable:
+ clk_disable_unprepare(bs->clk);
+ return err;
+}
+
+static int bcm2835_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+ bcm2835_debugfs_remove(bs);
+
+ spi_unregister_controller(ctlr);
+
+ bcm2835_dma_release(ctlr, bs);
+
+ /* Clear FIFOs, and disable the HW block */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static void bcm2835_spi_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = bcm2835_spi_remove(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
+static const struct of_device_id bcm2835_spi_match[] = {
+ { .compatible = "brcm,bcm2835-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
+
+static struct platform_driver bcm2835_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = bcm2835_spi_match,
+ },
+ .probe = bcm2835_spi_probe,
+ .remove = bcm2835_spi_remove,
+ .shutdown = bcm2835_spi_shutdown,
+};
+module_platform_driver(bcm2835_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
+MODULE_AUTHOR("Chris Boot <bootc@bootc.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
new file mode 100644
index 000000000..e28521922
--- /dev/null
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom BCM2835 auxiliary SPI Controllers
+ *
+ * the driver does not rely on the native chipselects at all
+ * but only uses the gpio type chipselects
+ *
+ * Based on: spi-bcm2835.c
+ *
+ * Copyright (C) 2015 Martin Sperl
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+
+/* define polling limits */
+static unsigned int polling_limit_us = 30;
+module_param(polling_limit_us, uint, 0664);
+MODULE_PARM_DESC(polling_limit_us,
+ "time in us to run a transfer in polling mode - if zero no polling is used\n");
+
+/*
+ * spi register defines
+ *
+ * note there is garbage in the "official" documentation,
+ * so some data is taken from the file:
+ * brcm_usrlib/dag/vmcsx/vcinclude/bcm2708_chip/aux_io.h
+ * inside of:
+ * http://www.broadcom.com/docs/support/videocore/Brcm_Android_ICS_Graphics_Stack.tar.gz
+ */
+
+/* SPI register offsets */
+#define BCM2835_AUX_SPI_CNTL0 0x00
+#define BCM2835_AUX_SPI_CNTL1 0x04
+#define BCM2835_AUX_SPI_STAT 0x08
+#define BCM2835_AUX_SPI_PEEK 0x0C
+#define BCM2835_AUX_SPI_IO 0x20
+#define BCM2835_AUX_SPI_TXHOLD 0x30
+
+/* Bitfields in CNTL0 */
+#define BCM2835_AUX_SPI_CNTL0_SPEED 0xFFF00000
+#define BCM2835_AUX_SPI_CNTL0_SPEED_MAX 0xFFF
+#define BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT 20
+#define BCM2835_AUX_SPI_CNTL0_CS 0x000E0000
+#define BCM2835_AUX_SPI_CNTL0_POSTINPUT 0x00010000
+#define BCM2835_AUX_SPI_CNTL0_VAR_CS 0x00008000
+#define BCM2835_AUX_SPI_CNTL0_VAR_WIDTH 0x00004000
+#define BCM2835_AUX_SPI_CNTL0_DOUTHOLD 0x00003000
+#define BCM2835_AUX_SPI_CNTL0_ENABLE 0x00000800
+#define BCM2835_AUX_SPI_CNTL0_IN_RISING 0x00000400
+#define BCM2835_AUX_SPI_CNTL0_CLEARFIFO 0x00000200
+#define BCM2835_AUX_SPI_CNTL0_OUT_RISING 0x00000100
+#define BCM2835_AUX_SPI_CNTL0_CPOL 0x00000080
+#define BCM2835_AUX_SPI_CNTL0_MSBF_OUT 0x00000040
+#define BCM2835_AUX_SPI_CNTL0_SHIFTLEN 0x0000003F
+
+/* Bitfields in CNTL1 */
+#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
+#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000080
+#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000040
+#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
+#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
+
+/* Bitfields in STAT */
+#define BCM2835_AUX_SPI_STAT_TX_LVL 0xFF000000
+#define BCM2835_AUX_SPI_STAT_RX_LVL 0x00FF0000
+#define BCM2835_AUX_SPI_STAT_TX_FULL 0x00000400
+#define BCM2835_AUX_SPI_STAT_TX_EMPTY 0x00000200
+#define BCM2835_AUX_SPI_STAT_RX_FULL 0x00000100
+#define BCM2835_AUX_SPI_STAT_RX_EMPTY 0x00000080
+#define BCM2835_AUX_SPI_STAT_BUSY 0x00000040
+#define BCM2835_AUX_SPI_STAT_BITCOUNT 0x0000003F
+
+struct bcm2835aux_spi {
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ u32 cntl[2];
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int tx_len;
+ int rx_len;
+ int pending;
+
+ u64 count_transfer_polling;
+ u64 count_transfer_irq;
+ u64 count_transfer_irq_after_poll;
+
+ struct dentry *debugfs_dir;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static void bcm2835aux_debugfs_create(struct bcm2835aux_spi *bs,
+ const char *dname)
+{
+ char name[64];
+ struct dentry *dir;
+
+ /* get full name */
+ snprintf(name, sizeof(name), "spi-bcm2835aux-%s", dname);
+
+ /* the base directory */
+ dir = debugfs_create_dir(name, NULL);
+ bs->debugfs_dir = dir;
+
+ /* the counters */
+ debugfs_create_u64("count_transfer_polling", 0444, dir,
+ &bs->count_transfer_polling);
+ debugfs_create_u64("count_transfer_irq", 0444, dir,
+ &bs->count_transfer_irq);
+ debugfs_create_u64("count_transfer_irq_after_poll", 0444, dir,
+ &bs->count_transfer_irq_after_poll);
+}
+
+static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
+{
+ debugfs_remove_recursive(bs->debugfs_dir);
+ bs->debugfs_dir = NULL;
+}
+#else
+static void bcm2835aux_debugfs_create(struct bcm2835aux_spi *bs,
+ const char *dname)
+{
+}
+
+static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned int reg)
+{
+ return readl(bs->regs + reg);
+}
+
+static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned int reg,
+ u32 val)
+{
+ writel(val, bs->regs + reg);
+}
+
+static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs)
+{
+ u32 data;
+ int count = min(bs->rx_len, 3);
+
+ data = bcm2835aux_rd(bs, BCM2835_AUX_SPI_IO);
+ if (bs->rx_buf) {
+ switch (count) {
+ case 3:
+ *bs->rx_buf++ = (data >> 16) & 0xff;
+ fallthrough;
+ case 2:
+ *bs->rx_buf++ = (data >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ *bs->rx_buf++ = (data >> 0) & 0xff;
+ /* fallthrough - no default */
+ }
+ }
+ bs->rx_len -= count;
+ bs->pending -= count;
+}
+
+static inline void bcm2835aux_wr_fifo(struct bcm2835aux_spi *bs)
+{
+ u32 data;
+ u8 byte;
+ int count;
+ int i;
+
+ /* gather up to 3 bytes to write to the FIFO */
+ count = min(bs->tx_len, 3);
+ data = 0;
+ for (i = 0; i < count; i++) {
+ byte = bs->tx_buf ? *bs->tx_buf++ : 0;
+ data |= byte << (8 * (2 - i));
+ }
+
+ /* and set the variable bit-length */
+ data |= (count * 8) << 24;
+
+ /* and decrement length */
+ bs->tx_len -= count;
+ bs->pending += count;
+
+ /* write to the correct TX-register */
+ if (bs->tx_len)
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_TXHOLD, data);
+ else
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_IO, data);
+}
+
+static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
+{
+ /* disable spi clearing fifo and interrupts */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, 0);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0,
+ BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
+}
+
+static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
+{
+ u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
+
+ /* check if we have data to read */
+ for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
+ stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
+ bcm2835aux_rd_fifo(bs);
+
+ /* check if we have data to write */
+ while (bs->tx_len &&
+ (bs->pending < 12) &&
+ (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+ BCM2835_AUX_SPI_STAT_TX_FULL))) {
+ bcm2835aux_wr_fifo(bs);
+ }
+}
+
+static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ /* IRQ may be shared, so return if our interrupts are disabled */
+ if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
+ (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
+ return IRQ_NONE;
+
+ /* do common fifo handling */
+ bcm2835aux_spi_transfer_helper(bs);
+
+ if (!bs->tx_len) {
+ /* disable tx fifo empty interrupt */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
+ BCM2835_AUX_SPI_CNTL1_IDLE);
+ }
+
+ /* and if rx_len is 0 then disable interrupts and wake up completion */
+ if (!bs->rx_len) {
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
+ spi_finalize_current_transfer(master);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ /* enable interrupts */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
+ BCM2835_AUX_SPI_CNTL1_TXEMPTY |
+ BCM2835_AUX_SPI_CNTL1_IDLE);
+
+ /* and wait for finish... */
+ return 1;
+}
+
+static int bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ /* update statistics */
+ bs->count_transfer_irq++;
+
+ /* fill in registers and fifos before enabling interrupts */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
+
+ /* fill in tx fifo with data before enabling interrupts */
+ while ((bs->tx_len) &&
+ (bs->pending < 12) &&
+ (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+ BCM2835_AUX_SPI_STAT_TX_FULL))) {
+ bcm2835aux_wr_fifo(bs);
+ }
+
+ /* now run the interrupt mode */
+ return __bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
+}
+
+static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ unsigned long timeout;
+
+ /* update statistics */
+ bs->count_transfer_polling++;
+
+ /* configure spi */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
+
+ /* set the timeout to at least 2 jiffies */
+ timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
+
+ /* loop until finished the transfer */
+ while (bs->rx_len) {
+
+ /* do common fifo handling */
+ bcm2835aux_spi_transfer_helper(bs);
+
+ /* there is still data pending to read check the timeout */
+ if (bs->rx_len && time_after(jiffies, timeout)) {
+ dev_dbg_ratelimited(&spi->dev,
+ "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
+ jiffies - timeout,
+ bs->tx_len, bs->rx_len);
+ /* forward to interrupt handler */
+ bs->count_transfer_irq_after_poll++;
+ return __bcm2835aux_spi_transfer_one_irq(master,
+ spi, tfr);
+ }
+ }
+
+ /* and return without waiting for completion */
+ return 0;
+}
+
+static int bcm2835aux_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ unsigned long spi_hz, clk_hz, speed;
+ unsigned long hz_per_byte, byte_limit;
+
+ /* calculate the registers to handle
+ *
+ * note that we use the variable data mode, which
+ * is not optimal for longer transfers as we waste registers
+ * resulting (potentially) in more interrupts when transferring
+ * more than 12 bytes
+ */
+
+ /* set clock */
+ spi_hz = tfr->speed_hz;
+ clk_hz = clk_get_rate(bs->clk);
+
+ if (spi_hz >= clk_hz / 2) {
+ speed = 0;
+ } else if (spi_hz) {
+ speed = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1;
+ if (speed > BCM2835_AUX_SPI_CNTL0_SPEED_MAX)
+ speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
+ } else { /* the slowest we can go */
+ speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
+ }
+ /* mask out old speed from previous spi_transfer */
+ bs->cntl[0] &= ~(BCM2835_AUX_SPI_CNTL0_SPEED);
+ /* set the new speed */
+ bs->cntl[0] |= speed << BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT;
+
+ tfr->effective_speed_hz = clk_hz / (2 * (speed + 1));
+
+ /* set transmit buffers and length */
+ bs->tx_buf = tfr->tx_buf;
+ bs->rx_buf = tfr->rx_buf;
+ bs->tx_len = tfr->len;
+ bs->rx_len = tfr->len;
+ bs->pending = 0;
+
+ /* Calculate the estimated time in us the transfer runs. Note that
+ * there are 2 idle clocks cycles after each chunk getting
+ * transferred - in our case the chunk size is 3 bytes, so we
+ * approximate this by 9 cycles/byte. This is used to find the number
+ * of Hz per byte per polling limit. E.g., we can transfer 1 byte in
+ * 30 µs per 300,000 Hz of bus clock.
+ */
+ hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
+ byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1;
+
+ /* run in polling mode for short transfers */
+ if (tfr->len < byte_limit)
+ return bcm2835aux_spi_transfer_one_poll(master, spi, tfr);
+
+ /* run in interrupt mode for all others */
+ return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
+}
+
+static int bcm2835aux_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_device *spi = msg->spi;
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
+ BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
+ BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
+ bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
+
+ /* handle all the modes */
+ if (spi->mode & SPI_CPOL) {
+ bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
+ bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_OUT_RISING;
+ } else {
+ bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_IN_RISING;
+ }
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
+
+ return 0;
+}
+
+static int bcm2835aux_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ bcm2835aux_spi_reset_hw(bs);
+
+ return 0;
+}
+
+static void bcm2835aux_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ bcm2835aux_spi_reset_hw(bs);
+}
+
+static int bcm2835aux_spi_setup(struct spi_device *spi)
+{
+ /* sanity check for native cs */
+ if (spi->mode & SPI_NO_CS)
+ return 0;
+
+ if (spi->cs_gpiod)
+ return 0;
+
+ /* for dt-backwards compatibility: only support native on CS0
+ * known things not supported with broken native CS:
+ * * multiple chip-selects: cs0-cs2 are all
+ * simultaniously asserted whenever there is a transfer
+ * this even includes SPI_NO_CS
+ * * SPI_CS_HIGH: cs are always asserted low
+ * * cs_change: cs is deasserted after each spi_transfer
+ * * cs_delay_usec: cs is always deasserted one SCK cycle
+ * after the last transfer
+ * probably more...
+ */
+ dev_warn(&spi->dev,
+ "Native CS is not supported - please configure cs-gpio in device-tree\n");
+
+ if (spi->chip_select == 0)
+ return 0;
+
+ dev_warn(&spi->dev, "Native CS is not working for cs > 0\n");
+
+ return -EINVAL;
+}
+
+static int bcm2835aux_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm2835aux_spi *bs;
+ unsigned long clk_hz;
+ int err;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+ master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ /* even though the driver never officially supported native CS
+ * allow a single native CS for legacy DT support purposes when
+ * no cs-gpio is configured.
+ * Known limitations for native cs are:
+ * * multiple chip-selects: cs0-cs2 are all simultaniously asserted
+ * whenever there is a transfer - this even includes SPI_NO_CS
+ * * SPI_CS_HIGH: is ignores - cs are always asserted low
+ * * cs_change: cs is deasserted after each spi_transfer
+ * * cs_delay_usec: cs is always deasserted one SCK cycle after
+ * a spi_transfer
+ */
+ master->num_chipselect = 1;
+ master->setup = bcm2835aux_spi_setup;
+ master->transfer_one = bcm2835aux_spi_transfer_one;
+ master->handle_err = bcm2835aux_spi_handle_err;
+ master->prepare_message = bcm2835aux_spi_prepare_message;
+ master->unprepare_message = bcm2835aux_spi_unprepare_message;
+ master->dev.of_node = pdev->dev.of_node;
+ master->use_gpio_descriptors = true;
+
+ bs = spi_master_get_devdata(master);
+
+ /* the main area */
+ bs->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bs->regs))
+ return PTR_ERR(bs->regs);
+
+ bs->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(bs->clk)) {
+ err = PTR_ERR(bs->clk);
+ dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ return err;
+ }
+
+ bs->irq = platform_get_irq(pdev, 0);
+ if (bs->irq <= 0)
+ return bs->irq ? bs->irq : -ENODEV;
+
+ /* this also enables the HW block */
+ err = clk_prepare_enable(bs->clk);
+ if (err) {
+ dev_err(&pdev->dev, "could not prepare clock: %d\n", err);
+ return err;
+ }
+
+ /* just checking if the clock returns a sane value */
+ clk_hz = clk_get_rate(bs->clk);
+ if (!clk_hz) {
+ dev_err(&pdev->dev, "clock returns 0 Hz\n");
+ err = -ENODEV;
+ goto out_clk_disable;
+ }
+
+ /* reset SPI-HW block */
+ bcm2835aux_spi_reset_hw(bs);
+
+ err = devm_request_irq(&pdev->dev, bs->irq,
+ bcm2835aux_spi_interrupt,
+ IRQF_SHARED,
+ dev_name(&pdev->dev), master);
+ if (err) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ err = spi_register_master(master);
+ if (err) {
+ dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ bcm2835aux_debugfs_create(bs, dev_name(&pdev->dev));
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(bs->clk);
+ return err;
+}
+
+static int bcm2835aux_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ bcm2835aux_debugfs_remove(bs);
+
+ spi_unregister_master(master);
+
+ bcm2835aux_spi_reset_hw(bs);
+
+ /* disable the HW block by releasing the clock */
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835aux_spi_match[] = {
+ { .compatible = "brcm,bcm2835-aux-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm2835aux_spi_match);
+
+static struct platform_driver bcm2835aux_spi_driver = {
+ .driver = {
+ .name = "spi-bcm2835aux",
+ .of_match_table = bcm2835aux_spi_match,
+ },
+ .probe = bcm2835aux_spi_probe,
+ .remove = bcm2835aux_spi_remove,
+};
+module_platform_driver(bcm2835aux_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835 aux");
+MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
new file mode 100644
index 000000000..02f56fc00
--- /dev/null
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -0,0 +1,544 @@
+/*
+ * Broadcom BCM63XX High Speed SPI Controller driver
+ *
+ * Copyright 2000-2010 Broadcom Corporation
+ * Copyright 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/reset.h>
+#include <linux/pm_runtime.h>
+
+#define HSSPI_GLOBAL_CTRL_REG 0x0
+#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
+#define GLOBAL_CTRL_CS_POLARITY_MASK 0x000000ff
+#define GLOBAL_CTRL_PLL_CLK_CTRL_SHIFT 8
+#define GLOBAL_CTRL_PLL_CLK_CTRL_MASK 0x0000ff00
+#define GLOBAL_CTRL_CLK_GATE_SSOFF BIT(16)
+#define GLOBAL_CTRL_CLK_POLARITY BIT(17)
+#define GLOBAL_CTRL_MOSI_IDLE BIT(18)
+
+#define HSSPI_GLOBAL_EXT_TRIGGER_REG 0x4
+
+#define HSSPI_INT_STATUS_REG 0x8
+#define HSSPI_INT_STATUS_MASKED_REG 0xc
+#define HSSPI_INT_MASK_REG 0x10
+
+#define HSSPI_PINGx_CMD_DONE(i) BIT((i * 8) + 0)
+#define HSSPI_PINGx_RX_OVER(i) BIT((i * 8) + 1)
+#define HSSPI_PINGx_TX_UNDER(i) BIT((i * 8) + 2)
+#define HSSPI_PINGx_POLL_TIMEOUT(i) BIT((i * 8) + 3)
+#define HSSPI_PINGx_CTRL_INVAL(i) BIT((i * 8) + 4)
+
+#define HSSPI_INT_CLEAR_ALL 0xff001f1f
+
+#define HSSPI_PINGPONG_COMMAND_REG(x) (0x80 + (x) * 0x40)
+#define PINGPONG_CMD_COMMAND_MASK 0xf
+#define PINGPONG_COMMAND_NOOP 0
+#define PINGPONG_COMMAND_START_NOW 1
+#define PINGPONG_COMMAND_START_TRIGGER 2
+#define PINGPONG_COMMAND_HALT 3
+#define PINGPONG_COMMAND_FLUSH 4
+#define PINGPONG_CMD_PROFILE_SHIFT 8
+#define PINGPONG_CMD_SS_SHIFT 12
+
+#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
+
+#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
+#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
+#define CLK_CTRL_SPI_CLK_2X_SEL BIT(14)
+#define CLK_CTRL_ACCUM_RST_ON_LOOP BIT(15)
+
+#define HSSPI_PROFILE_SIGNAL_CTRL_REG(x) (0x104 + (x) * 0x20)
+#define SIGNAL_CTRL_LATCH_RISING BIT(12)
+#define SIGNAL_CTRL_LAUNCH_RISING BIT(13)
+#define SIGNAL_CTRL_ASYNC_INPUT_PATH BIT(16)
+
+#define HSSPI_PROFILE_MODE_CTRL_REG(x) (0x108 + (x) * 0x20)
+#define MODE_CTRL_MULTIDATA_RD_STRT_SHIFT 8
+#define MODE_CTRL_MULTIDATA_WR_STRT_SHIFT 12
+#define MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT 16
+#define MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT 18
+#define MODE_CTRL_MODE_3WIRE BIT(20)
+#define MODE_CTRL_PREPENDBYTE_CNT_SHIFT 24
+
+#define HSSPI_FIFO_REG(x) (0x200 + (x) * 0x200)
+
+
+#define HSSPI_OP_MULTIBIT BIT(11)
+#define HSSPI_OP_CODE_SHIFT 13
+#define HSSPI_OP_SLEEP (0 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ_WRITE (1 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_WRITE (2 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ (3 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_SETIRQ (4 << HSSPI_OP_CODE_SHIFT)
+
+#define HSSPI_BUFFER_LEN 512
+#define HSSPI_OPCODE_LEN 2
+
+#define HSSPI_MAX_PREPEND_LEN 15
+
+#define HSSPI_MAX_SYNC_CLOCK 30000000
+
+#define HSSPI_SPI_MAX_CS 8
+#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
+
+struct bcm63xx_hsspi {
+ struct completion done;
+ struct mutex bus_mutex;
+
+ struct platform_device *pdev;
+ struct clk *clk;
+ struct clk *pll_clk;
+ void __iomem *regs;
+ u8 __iomem *fifo;
+
+ u32 speed_hz;
+ u8 cs_polarity;
+};
+
+static void bcm63xx_hsspi_set_cs(struct bcm63xx_hsspi *bs, unsigned int cs,
+ bool active)
+{
+ u32 reg;
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ reg &= ~BIT(cs);
+ if (active == !(bs->cs_polarity & BIT(cs)))
+ reg |= BIT(cs);
+
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
+ struct spi_device *spi, int hz)
+{
+ unsigned int profile = spi->chip_select;
+ u32 reg;
+
+ reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
+ __raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
+ bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
+
+ reg = __raw_readl(bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+ if (hz > HSSPI_MAX_SYNC_CLOCK)
+ reg |= SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ else
+ reg &= ~SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ __raw_writel(reg, bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+
+ mutex_lock(&bs->bus_mutex);
+ /* setup clock polarity */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CLK_POLARITY;
+ if (spi->mode & SPI_CPOL)
+ reg |= GLOBAL_CTRL_CLK_POLARITY;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ unsigned int chip_select = spi->chip_select;
+ u16 opcode = 0;
+ int pending = t->len;
+ int step_size = HSSPI_BUFFER_LEN;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+ u32 val = 0;
+
+ bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+
+ if (tx && rx)
+ opcode = HSSPI_OP_READ_WRITE;
+ else if (tx)
+ opcode = HSSPI_OP_WRITE;
+ else if (rx)
+ opcode = HSSPI_OP_READ;
+
+ if (opcode != HSSPI_OP_READ)
+ step_size -= HSSPI_OPCODE_LEN;
+
+ if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
+ (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
+ opcode |= HSSPI_OP_MULTIBIT;
+
+ if (t->rx_nbits == SPI_NBITS_DUAL)
+ val |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
+ if (t->tx_nbits == SPI_NBITS_DUAL)
+ val |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
+ }
+
+ __raw_writel(val | 0xff,
+ bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+
+ while (pending > 0) {
+ int curr_step = min_t(int, step_size, pending);
+
+ reinit_completion(&bs->done);
+ if (tx) {
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
+ tx += curr_step;
+ }
+
+ __raw_writew(opcode | curr_step, bs->fifo);
+
+ /* enable interrupt */
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0),
+ bs->regs + HSSPI_INT_MASK_REG);
+
+ /* start the transfer */
+ __raw_writel(!chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW,
+ bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+
+ if (wait_for_completion_timeout(&bs->done, HZ) == 0) {
+ dev_err(&bs->pdev->dev, "transfer timed out!\n");
+ return -ETIMEDOUT;
+ }
+
+ if (rx) {
+ memcpy_fromio(rx, bs->fifo, curr_step);
+ rx += curr_step;
+ }
+
+ pending -= curr_step;
+ }
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_setup(struct spi_device *spi)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = __raw_readl(bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+ reg &= ~(SIGNAL_CTRL_LAUNCH_RISING | SIGNAL_CTRL_LATCH_RISING);
+ if (spi->mode & SPI_CPHA)
+ reg |= SIGNAL_CTRL_LAUNCH_RISING;
+ else
+ reg |= SIGNAL_CTRL_LATCH_RISING;
+ __raw_writel(reg, bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ /* only change actual polarities if there is no transfer */
+ if ((reg & GLOBAL_CTRL_CS_POLARITY_MASK) == bs->cs_polarity) {
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= BIT(spi->chip_select);
+ else
+ reg &= ~BIT(spi->chip_select);
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ }
+
+ if (spi->mode & SPI_CS_HIGH)
+ bs->cs_polarity |= BIT(spi->chip_select);
+ else
+ bs->cs_polarity &= ~BIT(spi->chip_select);
+
+ mutex_unlock(&bs->bus_mutex);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ struct spi_device *spi = msg->spi;
+ int status = -EINVAL;
+ int dummy_cs;
+ u32 reg;
+
+ /* This controller does not support keeping CS active during idle.
+ * To work around this, we use the following ugly hack:
+ *
+ * a. Invert the target chip select's polarity so it will be active.
+ * b. Select a "dummy" chip select to use as the hardware target.
+ * c. Invert the dummy chip select's polarity so it will be inactive
+ * during the actual transfers.
+ * d. Tell the hardware to send to the dummy chip select. Thanks to
+ * the multiplexed nature of SPI the actual target will receive
+ * the transfer and we see its response.
+ *
+ * e. At the end restore the polarities again to their default values.
+ */
+
+ dummy_cs = !spi->chip_select;
+ bcm63xx_hsspi_set_cs(bs, dummy_cs, true);
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ status = bcm63xx_hsspi_do_txrx(spi, t);
+ if (status)
+ break;
+
+ msg->actual_length += t->len;
+
+ spi_transfer_delay_exec(t);
+
+ if (t->cs_change)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+ }
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CS_POLARITY_MASK;
+ reg |= bs->cs_polarity;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static irqreturn_t bcm63xx_hsspi_interrupt(int irq, void *dev_id)
+{
+ struct bcm63xx_hsspi *bs = (struct bcm63xx_hsspi *)dev_id;
+
+ if (__raw_readl(bs->regs + HSSPI_INT_STATUS_MASKED_REG) == 0)
+ return IRQ_NONE;
+
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ complete(&bs->done);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm63xx_hsspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm63xx_hsspi *bs;
+ void __iomem *regs;
+ struct device *dev = &pdev->dev;
+ struct clk *clk, *pll_clk = NULL;
+ int irq, ret;
+ u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
+ struct reset_control *reset;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ clk = devm_clk_get(dev, "hsspi");
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_reset(reset);
+ if (ret) {
+ dev_err(dev, "unable to reset device: %d\n", ret);
+ goto out_disable_clk;
+ }
+
+ rate = clk_get_rate(clk);
+ if (!rate) {
+ pll_clk = devm_clk_get(dev, "pll");
+
+ if (IS_ERR(pll_clk)) {
+ ret = PTR_ERR(pll_clk);
+ goto out_disable_clk;
+ }
+
+ ret = clk_prepare_enable(pll_clk);
+ if (ret)
+ goto out_disable_clk;
+
+ rate = clk_get_rate(pll_clk);
+ if (!rate) {
+ ret = -EINVAL;
+ goto out_disable_pll_clk;
+ }
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ ret = -ENOMEM;
+ goto out_disable_pll_clk;
+ }
+
+ bs = spi_master_get_devdata(master);
+ bs->pdev = pdev;
+ bs->clk = clk;
+ bs->pll_clk = pll_clk;
+ bs->regs = regs;
+ bs->speed_hz = rate;
+ bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
+
+ mutex_init(&bs->bus_mutex);
+ init_completion(&bs->done);
+
+ master->dev.of_node = dev->of_node;
+ if (!dev->of_node)
+ master->bus_num = HSSPI_BUS_NUM;
+
+ of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (num_cs > 8) {
+ dev_warn(dev, "unsupported number of cs (%i), reducing to 8\n",
+ num_cs);
+ num_cs = HSSPI_SPI_MAX_CS;
+ }
+ master->num_chipselect = num_cs;
+ master->setup = bcm63xx_hsspi_setup;
+ master->transfer_one_message = bcm63xx_hsspi_transfer_one;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+ SPI_RX_DUAL | SPI_TX_DUAL;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ /* Initialize the hardware */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ /* clean up any pending interrupts */
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+
+ /* read out default CS polarities */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ bs->cs_polarity = reg & GLOBAL_CTRL_CS_POLARITY_MASK;
+ __raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
+ bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
+ pdev->name, bs);
+
+ if (ret)
+ goto out_put_master;
+
+ pm_runtime_enable(&pdev->dev);
+
+ /* register and we are done */
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto out_pm_disable;
+
+ return 0;
+
+out_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+out_put_master:
+ spi_master_put(master);
+out_disable_pll_clk:
+ clk_disable_unprepare(pll_clk);
+out_disable_clk:
+ clk_disable_unprepare(clk);
+ return ret;
+}
+
+
+static int bcm63xx_hsspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+ /* reset the hardware and block queue progress */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+ clk_disable_unprepare(bs->pll_clk);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcm63xx_hsspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+ clk_disable_unprepare(bs->pll_clk);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
+
+ if (bs->pll_clk) {
+ ret = clk_prepare_enable(bs->pll_clk);
+ if (ret) {
+ clk_disable_unprepare(bs->clk);
+ return ret;
+ }
+ }
+
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bcm63xx_hsspi_pm_ops, bcm63xx_hsspi_suspend,
+ bcm63xx_hsspi_resume);
+
+static const struct of_device_id bcm63xx_hsspi_of_match[] = {
+ { .compatible = "brcm,bcm6328-hsspi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm63xx_hsspi_of_match);
+
+static struct platform_driver bcm63xx_hsspi_driver = {
+ .driver = {
+ .name = "bcm63xx-hsspi",
+ .pm = &bcm63xx_hsspi_pm_ops,
+ .of_match_table = bcm63xx_hsspi_of_match,
+ },
+ .probe = bcm63xx_hsspi_probe,
+ .remove = bcm63xx_hsspi_remove,
+};
+
+module_platform_driver(bcm63xx_hsspi_driver);
+
+MODULE_ALIAS("platform:bcm63xx_hsspi");
+MODULE_DESCRIPTION("Broadcom BCM63xx High Speed SPI Controller driver");
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
new file mode 100644
index 000000000..147199002
--- /dev/null
+++ b/drivers/spi/spi-bcm63xx.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Broadcom BCM63xx SPI controller support
+ *
+ * Copyright (C) 2009-2012 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/reset.h>
+
+/* BCM 6338/6348 SPI core */
+#define SPI_6348_RSET_SIZE 64
+#define SPI_6348_CMD 0x00 /* 16-bits register */
+#define SPI_6348_INT_STATUS 0x02
+#define SPI_6348_INT_MASK_ST 0x03
+#define SPI_6348_INT_MASK 0x04
+#define SPI_6348_ST 0x05
+#define SPI_6348_CLK_CFG 0x06
+#define SPI_6348_FILL_BYTE 0x07
+#define SPI_6348_MSG_TAIL 0x09
+#define SPI_6348_RX_TAIL 0x0b
+#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */
+#define SPI_6348_MSG_CTL_WIDTH 8
+#define SPI_6348_MSG_DATA 0x41
+#define SPI_6348_MSG_DATA_SIZE 0x3f
+#define SPI_6348_RX_DATA 0x80
+#define SPI_6348_RX_DATA_SIZE 0x3f
+
+/* BCM 3368/6358/6262/6368 SPI core */
+#define SPI_6358_RSET_SIZE 1804
+#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
+#define SPI_6358_MSG_CTL_WIDTH 16
+#define SPI_6358_MSG_DATA 0x02
+#define SPI_6358_MSG_DATA_SIZE 0x21e
+#define SPI_6358_RX_DATA 0x400
+#define SPI_6358_RX_DATA_SIZE 0x220
+#define SPI_6358_CMD 0x700 /* 16-bits register */
+#define SPI_6358_INT_STATUS 0x702
+#define SPI_6358_INT_MASK_ST 0x703
+#define SPI_6358_INT_MASK 0x704
+#define SPI_6358_ST 0x705
+#define SPI_6358_CLK_CFG 0x706
+#define SPI_6358_FILL_BYTE 0x707
+#define SPI_6358_MSG_TAIL 0x709
+#define SPI_6358_RX_TAIL 0x70B
+
+/* Shared SPI definitions */
+
+/* Message configuration */
+#define SPI_FD_RW 0x00
+#define SPI_HD_W 0x01
+#define SPI_HD_R 0x02
+#define SPI_BYTE_CNT_SHIFT 0
+#define SPI_6348_MSG_TYPE_SHIFT 6
+#define SPI_6358_MSG_TYPE_SHIFT 14
+
+/* Command */
+#define SPI_CMD_NOOP 0x00
+#define SPI_CMD_SOFT_RESET 0x01
+#define SPI_CMD_HARD_RESET 0x02
+#define SPI_CMD_START_IMMEDIATE 0x03
+#define SPI_CMD_COMMAND_SHIFT 0
+#define SPI_CMD_COMMAND_MASK 0x000f
+#define SPI_CMD_DEVICE_ID_SHIFT 4
+#define SPI_CMD_PREPEND_BYTE_CNT_SHIFT 8
+#define SPI_CMD_ONE_BYTE_SHIFT 11
+#define SPI_CMD_ONE_WIRE_SHIFT 12
+#define SPI_DEV_ID_0 0
+#define SPI_DEV_ID_1 1
+#define SPI_DEV_ID_2 2
+#define SPI_DEV_ID_3 3
+
+/* Interrupt mask */
+#define SPI_INTR_CMD_DONE 0x01
+#define SPI_INTR_RX_OVERFLOW 0x02
+#define SPI_INTR_TX_UNDERFLOW 0x04
+#define SPI_INTR_TX_OVERFLOW 0x08
+#define SPI_INTR_RX_UNDERFLOW 0x10
+#define SPI_INTR_CLEAR_ALL 0x1f
+
+/* Status */
+#define SPI_RX_EMPTY 0x02
+#define SPI_CMD_BUSY 0x04
+#define SPI_SERIAL_BUSY 0x08
+
+/* Clock configuration */
+#define SPI_CLK_20MHZ 0x00
+#define SPI_CLK_0_391MHZ 0x01
+#define SPI_CLK_0_781MHZ 0x02 /* default */
+#define SPI_CLK_1_563MHZ 0x03
+#define SPI_CLK_3_125MHZ 0x04
+#define SPI_CLK_6_250MHZ 0x05
+#define SPI_CLK_12_50MHZ 0x06
+#define SPI_CLK_MASK 0x07
+#define SPI_SSOFFTIME_MASK 0x38
+#define SPI_SSOFFTIME_SHIFT 3
+#define SPI_BYTE_SWAP 0x80
+
+enum bcm63xx_regs_spi {
+ SPI_CMD,
+ SPI_INT_STATUS,
+ SPI_INT_MASK_ST,
+ SPI_INT_MASK,
+ SPI_ST,
+ SPI_CLK_CFG,
+ SPI_FILL_BYTE,
+ SPI_MSG_TAIL,
+ SPI_RX_TAIL,
+ SPI_MSG_CTL,
+ SPI_MSG_DATA,
+ SPI_RX_DATA,
+ SPI_MSG_TYPE_SHIFT,
+ SPI_MSG_CTL_WIDTH,
+ SPI_MSG_DATA_SIZE,
+};
+
+#define BCM63XX_SPI_MAX_PREPEND 7
+
+#define BCM63XX_SPI_MAX_CS 8
+#define BCM63XX_SPI_BUS_NUM 0
+
+struct bcm63xx_spi {
+ struct completion done;
+
+ void __iomem *regs;
+ int irq;
+
+ /* Platform data */
+ const unsigned long *reg_offsets;
+ unsigned int fifo_size;
+ unsigned int msg_type_shift;
+ unsigned int msg_ctl_width;
+
+ /* data iomem */
+ u8 __iomem *tx_io;
+ const u8 __iomem *rx_io;
+
+ struct clk *clk;
+ struct platform_device *pdev;
+};
+
+static inline u8 bcm_spi_readb(struct bcm63xx_spi *bs,
+ unsigned int offset)
+{
+ return readb(bs->regs + bs->reg_offsets[offset]);
+}
+
+static inline u16 bcm_spi_readw(struct bcm63xx_spi *bs,
+ unsigned int offset)
+{
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ return ioread16be(bs->regs + bs->reg_offsets[offset]);
+#else
+ return readw(bs->regs + bs->reg_offsets[offset]);
+#endif
+}
+
+static inline void bcm_spi_writeb(struct bcm63xx_spi *bs,
+ u8 value, unsigned int offset)
+{
+ writeb(value, bs->regs + bs->reg_offsets[offset]);
+}
+
+static inline void bcm_spi_writew(struct bcm63xx_spi *bs,
+ u16 value, unsigned int offset)
+{
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ iowrite16be(value, bs->regs + bs->reg_offsets[offset]);
+#else
+ writew(value, bs->regs + bs->reg_offsets[offset]);
+#endif
+}
+
+static const unsigned int bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
+ { 20000000, SPI_CLK_20MHZ },
+ { 12500000, SPI_CLK_12_50MHZ },
+ { 6250000, SPI_CLK_6_250MHZ },
+ { 3125000, SPI_CLK_3_125MHZ },
+ { 1563000, SPI_CLK_1_563MHZ },
+ { 781000, SPI_CLK_0_781MHZ },
+ { 391000, SPI_CLK_0_391MHZ }
+};
+
+static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ u8 clk_cfg, reg;
+ int i;
+
+ /* Default to lowest clock configuration */
+ clk_cfg = SPI_CLK_0_391MHZ;
+
+ /* Find the closest clock configuration */
+ for (i = 0; i < SPI_CLK_MASK; i++) {
+ if (t->speed_hz >= bcm63xx_spi_freq_table[i][0]) {
+ clk_cfg = bcm63xx_spi_freq_table[i][1];
+ break;
+ }
+ }
+
+ /* clear existing clock configuration bits of the register */
+ reg = bcm_spi_readb(bs, SPI_CLK_CFG);
+ reg &= ~SPI_CLK_MASK;
+ reg |= clk_cfg;
+
+ bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
+ dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
+ clk_cfg, t->speed_hz);
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA)
+
+static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
+ unsigned int num_transfers)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ u16 msg_ctl;
+ u16 cmd;
+ unsigned int i, timeout = 0, prepend_len = 0, len = 0;
+ struct spi_transfer *t = first;
+ bool do_rx = false;
+ bool do_tx = false;
+
+ /* Disable the CMD_DONE interrupt */
+ bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+
+ dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
+ t->tx_buf, t->rx_buf, t->len);
+
+ if (num_transfers > 1 && t->tx_buf && t->len <= BCM63XX_SPI_MAX_PREPEND)
+ prepend_len = t->len;
+
+ /* prepare the buffer */
+ for (i = 0; i < num_transfers; i++) {
+ if (t->tx_buf) {
+ do_tx = true;
+ memcpy_toio(bs->tx_io + len, t->tx_buf, t->len);
+
+ /* don't prepend more than one tx */
+ if (t != first)
+ prepend_len = 0;
+ }
+
+ if (t->rx_buf) {
+ do_rx = true;
+ /* prepend is half-duplex write only */
+ if (t == first)
+ prepend_len = 0;
+ }
+
+ len += t->len;
+
+ t = list_entry(t->transfer_list.next, struct spi_transfer,
+ transfer_list);
+ }
+
+ reinit_completion(&bs->done);
+
+ /* Fill in the Message control register */
+ msg_ctl = (len << SPI_BYTE_CNT_SHIFT);
+
+ if (do_rx && do_tx && prepend_len == 0)
+ msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
+ else if (do_rx)
+ msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
+ else if (do_tx)
+ msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
+
+ switch (bs->msg_ctl_width) {
+ case 8:
+ bcm_spi_writeb(bs, msg_ctl, SPI_MSG_CTL);
+ break;
+ case 16:
+ bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
+ break;
+ }
+
+ /* Issue the transfer */
+ cmd = SPI_CMD_START_IMMEDIATE;
+ cmd |= (prepend_len << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
+ cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
+ bcm_spi_writew(bs, cmd, SPI_CMD);
+
+ /* Enable the CMD_DONE interrupt */
+ bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
+
+ timeout = wait_for_completion_timeout(&bs->done, HZ);
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ if (!do_rx)
+ return 0;
+
+ len = 0;
+ t = first;
+ /* Read out all the data */
+ for (i = 0; i < num_transfers; i++) {
+ if (t->rx_buf)
+ memcpy_fromio(t->rx_buf, bs->rx_io + len, t->len);
+
+ if (t != first || prepend_len == 0)
+ len += t->len;
+
+ t = list_entry(t->transfer_list.next, struct spi_transfer,
+ transfer_list);
+ }
+
+ return 0;
+}
+
+static int bcm63xx_spi_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *t, *first = NULL;
+ struct spi_device *spi = m->spi;
+ int status = 0;
+ unsigned int n_transfers = 0, total_len = 0;
+ bool can_use_prepend = false;
+
+ /*
+ * This SPI controller does not support keeping CS active after a
+ * transfer.
+ * Work around this by merging as many transfers we can into one big
+ * full-duplex transfers.
+ */
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (!first)
+ first = t;
+
+ n_transfers++;
+ total_len += t->len;
+
+ if (n_transfers == 2 && !first->rx_buf && !t->tx_buf &&
+ first->len <= BCM63XX_SPI_MAX_PREPEND)
+ can_use_prepend = true;
+ else if (can_use_prepend && t->tx_buf)
+ can_use_prepend = false;
+
+ /* we can only transfer one fifo worth of data */
+ if ((can_use_prepend &&
+ total_len > (bs->fifo_size + BCM63XX_SPI_MAX_PREPEND)) ||
+ (!can_use_prepend && total_len > bs->fifo_size)) {
+ dev_err(&spi->dev, "unable to do transfers larger than FIFO size (%i > %i)\n",
+ total_len, bs->fifo_size);
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* all combined transfers have to have the same speed */
+ if (t->speed_hz != first->speed_hz) {
+ dev_err(&spi->dev, "unable to change speed between transfers\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* CS will be deasserted directly after transfer */
+ if (t->delay.value) {
+ dev_err(&spi->dev, "unable to keep CS asserted after transfer\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (t->cs_change ||
+ list_is_last(&t->transfer_list, &m->transfers)) {
+ /* configure adapter for a new transfer */
+ bcm63xx_spi_setup_transfer(spi, first);
+
+ /* send the data */
+ status = bcm63xx_txrx_bufs(spi, first, n_transfers);
+ if (status)
+ goto exit;
+
+ m->actual_length += total_len;
+
+ first = NULL;
+ n_transfers = 0;
+ total_len = 0;
+ can_use_prepend = false;
+ }
+ }
+exit:
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+/* This driver supports single master mode only. Hence
+ * CMD_DONE is the only interrupt we care about
+ */
+static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = (struct spi_master *)dev_id;
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ u8 intr;
+
+ /* Read interupts and clear them immediately */
+ intr = bcm_spi_readb(bs, SPI_INT_STATUS);
+ bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
+ bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+
+ /* A transfer completed */
+ if (intr & SPI_INTR_CMD_DONE)
+ complete(&bs->done);
+
+ return IRQ_HANDLED;
+}
+
+static size_t bcm63xx_spi_max_length(struct spi_device *spi)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+
+ return bs->fifo_size;
+}
+
+static const unsigned long bcm6348_spi_reg_offsets[] = {
+ [SPI_CMD] = SPI_6348_CMD,
+ [SPI_INT_STATUS] = SPI_6348_INT_STATUS,
+ [SPI_INT_MASK_ST] = SPI_6348_INT_MASK_ST,
+ [SPI_INT_MASK] = SPI_6348_INT_MASK,
+ [SPI_ST] = SPI_6348_ST,
+ [SPI_CLK_CFG] = SPI_6348_CLK_CFG,
+ [SPI_FILL_BYTE] = SPI_6348_FILL_BYTE,
+ [SPI_MSG_TAIL] = SPI_6348_MSG_TAIL,
+ [SPI_RX_TAIL] = SPI_6348_RX_TAIL,
+ [SPI_MSG_CTL] = SPI_6348_MSG_CTL,
+ [SPI_MSG_DATA] = SPI_6348_MSG_DATA,
+ [SPI_RX_DATA] = SPI_6348_RX_DATA,
+ [SPI_MSG_TYPE_SHIFT] = SPI_6348_MSG_TYPE_SHIFT,
+ [SPI_MSG_CTL_WIDTH] = SPI_6348_MSG_CTL_WIDTH,
+ [SPI_MSG_DATA_SIZE] = SPI_6348_MSG_DATA_SIZE,
+};
+
+static const unsigned long bcm6358_spi_reg_offsets[] = {
+ [SPI_CMD] = SPI_6358_CMD,
+ [SPI_INT_STATUS] = SPI_6358_INT_STATUS,
+ [SPI_INT_MASK_ST] = SPI_6358_INT_MASK_ST,
+ [SPI_INT_MASK] = SPI_6358_INT_MASK,
+ [SPI_ST] = SPI_6358_ST,
+ [SPI_CLK_CFG] = SPI_6358_CLK_CFG,
+ [SPI_FILL_BYTE] = SPI_6358_FILL_BYTE,
+ [SPI_MSG_TAIL] = SPI_6358_MSG_TAIL,
+ [SPI_RX_TAIL] = SPI_6358_RX_TAIL,
+ [SPI_MSG_CTL] = SPI_6358_MSG_CTL,
+ [SPI_MSG_DATA] = SPI_6358_MSG_DATA,
+ [SPI_RX_DATA] = SPI_6358_RX_DATA,
+ [SPI_MSG_TYPE_SHIFT] = SPI_6358_MSG_TYPE_SHIFT,
+ [SPI_MSG_CTL_WIDTH] = SPI_6358_MSG_CTL_WIDTH,
+ [SPI_MSG_DATA_SIZE] = SPI_6358_MSG_DATA_SIZE,
+};
+
+static const struct platform_device_id bcm63xx_spi_dev_match[] = {
+ {
+ .name = "bcm6348-spi",
+ .driver_data = (unsigned long)bcm6348_spi_reg_offsets,
+ },
+ {
+ .name = "bcm6358-spi",
+ .driver_data = (unsigned long)bcm6358_spi_reg_offsets,
+ },
+ {
+ },
+};
+
+static const struct of_device_id bcm63xx_spi_of_match[] = {
+ { .compatible = "brcm,bcm6348-spi", .data = &bcm6348_spi_reg_offsets },
+ { .compatible = "brcm,bcm6358-spi", .data = &bcm6358_spi_reg_offsets },
+ { },
+};
+
+static int bcm63xx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ const unsigned long *bcm63xx_spireg;
+ struct device *dev = &pdev->dev;
+ int irq, bus_num;
+ struct spi_master *master;
+ struct clk *clk;
+ struct bcm63xx_spi *bs;
+ int ret;
+ u32 num_cs = BCM63XX_SPI_MAX_CS;
+ struct reset_control *reset;
+
+ if (dev->of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(bcm63xx_spi_of_match, dev->of_node);
+ if (!match)
+ return -EINVAL;
+ bcm63xx_spireg = match->data;
+
+ of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (num_cs > BCM63XX_SPI_MAX_CS) {
+ dev_warn(dev, "unsupported number of cs (%i), reducing to 8\n",
+ num_cs);
+ num_cs = BCM63XX_SPI_MAX_CS;
+ }
+
+ bus_num = -1;
+ } else if (pdev->id_entry->driver_data) {
+ const struct platform_device_id *match = pdev->id_entry;
+
+ bcm63xx_spireg = (const unsigned long *)match->driver_data;
+ bus_num = BCM63XX_SPI_BUS_NUM;
+ } else {
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ clk = devm_clk_get(dev, "spi");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "no clock for device\n");
+ return PTR_ERR(clk);
+ }
+
+ reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ master = spi_alloc_master(dev, sizeof(*bs));
+ if (!master) {
+ dev_err(dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ bs = spi_master_get_devdata(master);
+ init_completion(&bs->done);
+
+ platform_set_drvdata(pdev, master);
+ bs->pdev = pdev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bs->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(bs->regs)) {
+ ret = PTR_ERR(bs->regs);
+ goto out_err;
+ }
+
+ bs->irq = irq;
+ bs->clk = clk;
+ bs->reg_offsets = bcm63xx_spireg;
+ bs->fifo_size = bs->reg_offsets[SPI_MSG_DATA_SIZE];
+
+ ret = devm_request_irq(&pdev->dev, irq, bcm63xx_spi_interrupt, 0,
+ pdev->name, master);
+ if (ret) {
+ dev_err(dev, "unable to request irq\n");
+ goto out_err;
+ }
+
+ master->dev.of_node = dev->of_node;
+ master->bus_num = bus_num;
+ master->num_chipselect = num_cs;
+ master->transfer_one_message = bcm63xx_spi_transfer_one;
+ master->mode_bits = MODEBITS;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_transfer_size = bcm63xx_spi_max_length;
+ master->max_message_size = bcm63xx_spi_max_length;
+ master->auto_runtime_pm = true;
+ bs->msg_type_shift = bs->reg_offsets[SPI_MSG_TYPE_SHIFT];
+ bs->msg_ctl_width = bs->reg_offsets[SPI_MSG_CTL_WIDTH];
+ bs->tx_io = (u8 *)(bs->regs + bs->reg_offsets[SPI_MSG_DATA]);
+ bs->rx_io = (const u8 *)(bs->regs + bs->reg_offsets[SPI_RX_DATA]);
+
+ /* Initialize hardware */
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ goto out_err;
+
+ ret = reset_control_reset(reset);
+ if (ret) {
+ dev_err(dev, "unable to reset device: %d\n", ret);
+ goto out_clk_disable;
+ }
+
+ bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
+
+ pm_runtime_enable(&pdev->dev);
+
+ /* register and we are done */
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(dev, "spi register failed\n");
+ goto out_pm_disable;
+ }
+
+ dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n",
+ r, irq, bs->fifo_size);
+
+ return 0;
+
+out_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+out_clk_disable:
+ clk_disable_unprepare(clk);
+out_err:
+ spi_master_put(master);
+ return ret;
+}
+
+static int bcm63xx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+
+ /* reset spi block */
+ bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+
+ /* HW shutdown */
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcm63xx_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static int bcm63xx_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
+
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops bcm63xx_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(bcm63xx_spi_suspend, bcm63xx_spi_resume)
+};
+
+static struct platform_driver bcm63xx_spi_driver = {
+ .driver = {
+ .name = "bcm63xx-spi",
+ .pm = &bcm63xx_spi_pm_ops,
+ .of_match_table = bcm63xx_spi_of_match,
+ },
+ .id_table = bcm63xx_spi_dev_match,
+ .probe = bcm63xx_spi_probe,
+ .remove = bcm63xx_spi_remove,
+};
+
+module_platform_driver(bcm63xx_spi_driver);
+
+MODULE_ALIAS("platform:bcm63xx_spi");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>");
+MODULE_DESCRIPTION("Broadcom BCM63xx SPI Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
new file mode 100644
index 000000000..2dcbe166d
--- /dev/null
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Mix this utility code with some glue code to get one of several types of
+ * simple SPI master driver. Two do polled word-at-a-time I/O:
+ *
+ * - GPIO/parport bitbangers. Provide chipselect() and txrx_word[](),
+ * expanding the per-word routines from the inline templates below.
+ *
+ * - Drivers for controllers resembling bare shift registers. Provide
+ * chipselect() and txrx_word[](), with custom setup()/cleanup() methods
+ * that use your controller's clock and chipselect registers.
+ *
+ * Some hardware works well with requests at spi_transfer scope:
+ *
+ * - Drivers leveraging smarter hardware, with fifos or DMA; or for half
+ * duplex (MicroWire) controllers. Provide chipselect() and txrx_bufs(),
+ * and custom setup()/cleanup() methods.
+ */
+
+/*
+ * The code that knows what GPIO pins do what should have declared four
+ * functions, ideally as inlines, before including this header:
+ *
+ * void setsck(struct spi_device *, int is_on);
+ * void setmosi(struct spi_device *, int is_on);
+ * int getmiso(struct spi_device *);
+ * void spidelay(unsigned);
+ *
+ * setsck()'s is_on parameter is a zero/nonzero boolean.
+ *
+ * setmosi()'s is_on parameter is a zero/nonzero boolean.
+ *
+ * getmiso() is required to return 0 or 1 only. Any other value is invalid
+ * and will result in improper operation.
+ *
+ * A non-inlined routine would call bitbang_txrx_*() routines. The
+ * main loop could easily compile down to a handful of instructions,
+ * especially if the delay is a NOP (to run at peak speed).
+ *
+ * Since this is software, the timings may not be exactly what your board's
+ * chips need ... there may be several reasons you'd need to tweak timings
+ * in these routines, not just to make it faster or slower to match a
+ * particular CPU clock rate.
+ *
+ * ToDo: Maybe the bitrev macros can be used to improve the code?
+ */
+
+static inline u32
+bitbang_txrx_be_cpha0(struct spi_device *spi,
+ unsigned nsecs, unsigned cpol, unsigned flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+
+ u32 oldbit = (!(word & (1<<(bits-1)))) << 31;
+ /* clock starts at inactive polarity */
+ for (word <<= (32 - bits); likely(bits); bits--) {
+
+ /* setup MSB (to slave) on trailing edge */
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & (1 << 31)) != oldbit) {
+ setmosi(spi, word & (1 << 31));
+ oldbit = word & (1 << 31);
+ }
+ }
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, !cpol);
+ spidelay(nsecs);
+
+ /* sample MSB (from slave) on leading edge */
+ word <<= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi);
+ setsck(spi, cpol);
+ }
+ return word;
+}
+
+static inline u32
+bitbang_txrx_be_cpha1(struct spi_device *spi,
+ unsigned nsecs, unsigned cpol, unsigned flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+
+ u32 oldbit = (!(word & (1<<(bits-1)))) << 31;
+ /* clock starts at inactive polarity */
+ for (word <<= (32 - bits); likely(bits); bits--) {
+
+ /* setup MSB (to slave) on leading edge */
+ setsck(spi, !cpol);
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & (1 << 31)) != oldbit) {
+ setmosi(spi, word & (1 << 31));
+ oldbit = word & (1 << 31);
+ }
+ }
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, cpol);
+ spidelay(nsecs);
+
+ /* sample MSB (from slave) on trailing edge */
+ word <<= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi);
+ }
+ return word;
+}
+
+static inline u32
+bitbang_txrx_le_cpha0(struct spi_device *spi,
+ unsigned int nsecs, unsigned int cpol, unsigned int flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+
+ u8 rxbit = bits - 1;
+ u32 oldbit = !(word & 1);
+ /* clock starts at inactive polarity */
+ for (; likely(bits); bits--) {
+
+ /* setup LSB (to slave) on trailing edge */
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & 1) != oldbit) {
+ setmosi(spi, word & 1);
+ oldbit = word & 1;
+ }
+ }
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, !cpol);
+ spidelay(nsecs);
+
+ /* sample LSB (from slave) on leading edge */
+ word >>= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi) << rxbit;
+ setsck(spi, cpol);
+ }
+ return word;
+}
+
+static inline u32
+bitbang_txrx_le_cpha1(struct spi_device *spi,
+ unsigned int nsecs, unsigned int cpol, unsigned int flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+
+ u8 rxbit = bits - 1;
+ u32 oldbit = !(word & 1);
+ /* clock starts at inactive polarity */
+ for (; likely(bits); bits--) {
+
+ /* setup LSB (to slave) on leading edge */
+ setsck(spi, !cpol);
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & 1) != oldbit) {
+ setmosi(spi, word & 1);
+ oldbit = word & 1;
+ }
+ }
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, cpol);
+ spidelay(nsecs);
+
+ /* sample LSB (from slave) on trailing edge */
+ word >>= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi) << rxbit;
+ }
+ return word;
+}
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
new file mode 100644
index 000000000..27d0087f8
--- /dev/null
+++ b/drivers/spi/spi-bitbang.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * polling/bitbanging SPI master controller driver utilities
+ */
+
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#define SPI_BITBANG_CS_DELAY 100
+
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * FIRST PART (OPTIONAL): word-at-a-time spi_transfer support.
+ * Use this for GPIO or shift-register level hardware APIs.
+ *
+ * spi_bitbang_cs is in spi_device->controller_state, which is unavailable
+ * to glue code. These bitbang setup() and cleanup() routines are always
+ * used, though maybe they're called from controller-aware code.
+ *
+ * chipselect() and friends may use spi_device->controller_data and
+ * controller registers as appropriate.
+ *
+ *
+ * NOTE: SPI controller pins can often be used as GPIO pins instead,
+ * which means you could use a bitbang driver either to get hardware
+ * working quickly, or testing for differences that aren't speed related.
+ */
+
+struct spi_bitbang_cs {
+ unsigned nsecs; /* (clock cycle time)/2 */
+ u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs,
+ u32 word, u8 bits, unsigned flags);
+ unsigned (*txrx_bufs)(struct spi_device *,
+ u32 (*txrx_word)(
+ struct spi_device *spi,
+ unsigned nsecs,
+ u32 word, u8 bits,
+ unsigned flags),
+ unsigned, struct spi_transfer *,
+ unsigned);
+};
+
+static unsigned bitbang_txrx_8(
+ struct spi_device *spi,
+ u32 (*txrx_word)(struct spi_device *spi,
+ unsigned nsecs,
+ u32 word, u8 bits,
+ unsigned flags),
+ unsigned ns,
+ struct spi_transfer *t,
+ unsigned flags
+)
+{
+ unsigned bits = t->bits_per_word;
+ unsigned count = t->len;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+
+ while (likely(count > 0)) {
+ u8 word = 0;
+
+ if (tx)
+ word = *tx++;
+ word = txrx_word(spi, ns, word, bits, flags);
+ if (rx)
+ *rx++ = word;
+ count -= 1;
+ }
+ return t->len - count;
+}
+
+static unsigned bitbang_txrx_16(
+ struct spi_device *spi,
+ u32 (*txrx_word)(struct spi_device *spi,
+ unsigned nsecs,
+ u32 word, u8 bits,
+ unsigned flags),
+ unsigned ns,
+ struct spi_transfer *t,
+ unsigned flags
+)
+{
+ unsigned bits = t->bits_per_word;
+ unsigned count = t->len;
+ const u16 *tx = t->tx_buf;
+ u16 *rx = t->rx_buf;
+
+ while (likely(count > 1)) {
+ u16 word = 0;
+
+ if (tx)
+ word = *tx++;
+ word = txrx_word(spi, ns, word, bits, flags);
+ if (rx)
+ *rx++ = word;
+ count -= 2;
+ }
+ return t->len - count;
+}
+
+static unsigned bitbang_txrx_32(
+ struct spi_device *spi,
+ u32 (*txrx_word)(struct spi_device *spi,
+ unsigned nsecs,
+ u32 word, u8 bits,
+ unsigned flags),
+ unsigned ns,
+ struct spi_transfer *t,
+ unsigned flags
+)
+{
+ unsigned bits = t->bits_per_word;
+ unsigned count = t->len;
+ const u32 *tx = t->tx_buf;
+ u32 *rx = t->rx_buf;
+
+ while (likely(count > 3)) {
+ u32 word = 0;
+
+ if (tx)
+ word = *tx++;
+ word = txrx_word(spi, ns, word, bits, flags);
+ if (rx)
+ *rx++ = word;
+ count -= 4;
+ }
+ return t->len - count;
+}
+
+int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct spi_bitbang_cs *cs = spi->controller_state;
+ u8 bits_per_word;
+ u32 hz;
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ } else {
+ bits_per_word = 0;
+ hz = 0;
+ }
+
+ /* spi_transfer level calls that work per-word */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+ if (bits_per_word <= 8)
+ cs->txrx_bufs = bitbang_txrx_8;
+ else if (bits_per_word <= 16)
+ cs->txrx_bufs = bitbang_txrx_16;
+ else if (bits_per_word <= 32)
+ cs->txrx_bufs = bitbang_txrx_32;
+ else
+ return -EINVAL;
+
+ /* nsecs = (clock period)/2 */
+ if (!hz)
+ hz = spi->max_speed_hz;
+ if (hz) {
+ cs->nsecs = (1000000000/2) / hz;
+ if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer);
+
+/*
+ * spi_bitbang_setup - default setup for per-word I/O loops
+ */
+int spi_bitbang_setup(struct spi_device *spi)
+{
+ struct spi_bitbang_cs *cs = spi->controller_state;
+ struct spi_bitbang *bitbang;
+ bool initial_setup = false;
+ int retval;
+
+ bitbang = spi_master_get_devdata(spi->master);
+
+ if (!cs) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi->controller_state = cs;
+ initial_setup = true;
+ }
+
+ /* per-word shift register access, in hardware or bitbanging */
+ cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
+ if (!cs->txrx_word) {
+ retval = -EINVAL;
+ goto err_free;
+ }
+
+ if (bitbang->setup_transfer) {
+ retval = bitbang->setup_transfer(spi, NULL);
+ if (retval < 0)
+ goto err_free;
+ }
+
+ dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
+
+ return 0;
+
+err_free:
+ if (initial_setup)
+ kfree(cs);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_setup);
+
+/*
+ * spi_bitbang_cleanup - default cleanup for per-word I/O loops
+ */
+void spi_bitbang_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_cleanup);
+
+static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct spi_bitbang_cs *cs = spi->controller_state;
+ unsigned nsecs = cs->nsecs;
+ struct spi_bitbang *bitbang;
+
+ bitbang = spi_master_get_devdata(spi->master);
+ if (bitbang->set_line_direction) {
+ int err;
+
+ err = bitbang->set_line_direction(spi, !!(t->tx_buf));
+ if (err < 0)
+ return err;
+ }
+
+ if (spi->mode & SPI_3WIRE) {
+ unsigned flags;
+
+ flags = t->tx_buf ? SPI_MASTER_NO_RX : SPI_MASTER_NO_TX;
+ return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, flags);
+ }
+ return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, 0);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * SECOND PART ... simple transfer queue runner.
+ *
+ * This costs a task context per controller, running the queue by
+ * performing each transfer in sequence. Smarter hardware can queue
+ * several DMA transfers at once, and process several controller queues
+ * in parallel; this driver doesn't match such hardware very well.
+ *
+ * Drivers can provide word-at-a-time i/o primitives, or provide
+ * transfer-at-a-time ones to leverage dma or fifo hardware.
+ */
+
+static int spi_bitbang_prepare_hardware(struct spi_master *spi)
+{
+ struct spi_bitbang *bitbang;
+
+ bitbang = spi_master_get_devdata(spi);
+
+ mutex_lock(&bitbang->lock);
+ bitbang->busy = 1;
+ mutex_unlock(&bitbang->lock);
+
+ return 0;
+}
+
+static int spi_bitbang_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_bitbang *bitbang = spi_master_get_devdata(master);
+ int status = 0;
+
+ if (bitbang->setup_transfer) {
+ status = bitbang->setup_transfer(spi, transfer);
+ if (status < 0)
+ goto out;
+ }
+
+ if (transfer->len)
+ status = bitbang->txrx_bufs(spi, transfer);
+
+ if (status == transfer->len)
+ status = 0;
+ else if (status >= 0)
+ status = -EREMOTEIO;
+
+out:
+ spi_finalize_current_transfer(master);
+
+ return status;
+}
+
+static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
+{
+ struct spi_bitbang *bitbang;
+
+ bitbang = spi_master_get_devdata(spi);
+
+ mutex_lock(&bitbang->lock);
+ bitbang->busy = 0;
+ mutex_unlock(&bitbang->lock);
+
+ return 0;
+}
+
+static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_bitbang *bitbang = spi_master_get_devdata(spi->master);
+
+ /* SPI core provides CS high / low, but bitbang driver
+ * expects CS active
+ * spi device driver takes care of handling SPI_CS_HIGH
+ */
+ enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
+
+ ndelay(SPI_BITBANG_CS_DELAY);
+ bitbang->chipselect(spi, enable ? BITBANG_CS_ACTIVE :
+ BITBANG_CS_INACTIVE);
+ ndelay(SPI_BITBANG_CS_DELAY);
+}
+
+/*----------------------------------------------------------------------*/
+
+int spi_bitbang_init(struct spi_bitbang *bitbang)
+{
+ struct spi_master *master = bitbang->master;
+ bool custom_cs;
+
+ if (!master)
+ return -EINVAL;
+ /*
+ * We only need the chipselect callback if we are actually using it.
+ * If we just use GPIO descriptors, it is surplus. If the
+ * SPI_MASTER_GPIO_SS flag is set, we always need to call the
+ * driver-specific chipselect routine.
+ */
+ custom_cs = (!master->use_gpio_descriptors ||
+ (master->flags & SPI_MASTER_GPIO_SS));
+
+ if (custom_cs && !bitbang->chipselect)
+ return -EINVAL;
+
+ mutex_init(&bitbang->lock);
+
+ if (!master->mode_bits)
+ master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
+
+ if (master->transfer || master->transfer_one_message)
+ return -EINVAL;
+
+ master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
+ master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
+ master->transfer_one = spi_bitbang_transfer_one;
+ /*
+ * When using GPIO descriptors, the ->set_cs() callback doesn't even
+ * get called unless SPI_MASTER_GPIO_SS is set.
+ */
+ if (custom_cs)
+ master->set_cs = spi_bitbang_set_cs;
+
+ if (!bitbang->txrx_bufs) {
+ bitbang->use_dma = 0;
+ bitbang->txrx_bufs = spi_bitbang_bufs;
+ if (!master->setup) {
+ if (!bitbang->setup_transfer)
+ bitbang->setup_transfer =
+ spi_bitbang_setup_transfer;
+ master->setup = spi_bitbang_setup;
+ master->cleanup = spi_bitbang_cleanup;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_init);
+
+/**
+ * spi_bitbang_start - start up a polled/bitbanging SPI master driver
+ * @bitbang: driver handle
+ *
+ * Caller should have zero-initialized all parts of the structure, and then
+ * provided callbacks for chip selection and I/O loops. If the master has
+ * a transfer method, its final step should call spi_bitbang_transfer; or,
+ * that's the default if the transfer routine is not initialized. It should
+ * also set up the bus number and number of chipselects.
+ *
+ * For i/o loops, provide callbacks either per-word (for bitbanging, or for
+ * hardware that basically exposes a shift register) or per-spi_transfer
+ * (which takes better advantage of hardware like fifos or DMA engines).
+ *
+ * Drivers using per-word I/O loops should use (or call) spi_bitbang_setup,
+ * spi_bitbang_cleanup and spi_bitbang_setup_transfer to handle those spi
+ * master methods. Those methods are the defaults if the bitbang->txrx_bufs
+ * routine isn't initialized.
+ *
+ * This routine registers the spi_master, which will process requests in a
+ * dedicated task, keeping IRQs unblocked most of the time. To stop
+ * processing those requests, call spi_bitbang_stop().
+ *
+ * On success, this routine will take a reference to master. The caller is
+ * responsible for calling spi_bitbang_stop() to decrement the reference and
+ * spi_master_put() as counterpart of spi_alloc_master() to prevent a memory
+ * leak.
+ */
+int spi_bitbang_start(struct spi_bitbang *bitbang)
+{
+ struct spi_master *master = bitbang->master;
+ int ret;
+
+ ret = spi_bitbang_init(bitbang);
+ if (ret)
+ return ret;
+
+ /* driver may get busy before register() returns, especially
+ * if someone registered boardinfo for devices
+ */
+ ret = spi_register_master(spi_master_get(master));
+ if (ret)
+ spi_master_put(master);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_start);
+
+/*
+ * spi_bitbang_stop - stops the task providing spi communication
+ */
+void spi_bitbang_stop(struct spi_bitbang *bitbang)
+{
+ spi_unregister_master(bitbang->master);
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_stop);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/spi-brcmstb-qspi.c b/drivers/spi/spi-brcmstb-qspi.c
new file mode 100644
index 000000000..75e9b76da
--- /dev/null
+++ b/drivers/spi/spi-brcmstb-qspi.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2016 Broadcom
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include "spi-bcm-qspi.h"
+
+static const struct of_device_id brcmstb_qspi_of_match[] = {
+ { .compatible = "brcm,spi-brcmstb-qspi" },
+ { .compatible = "brcm,spi-brcmstb-mspi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmstb_qspi_of_match);
+
+static int brcmstb_qspi_probe(struct platform_device *pdev)
+{
+ return bcm_qspi_probe(pdev, NULL);
+}
+
+static int brcmstb_qspi_remove(struct platform_device *pdev)
+{
+ return bcm_qspi_remove(pdev);
+}
+
+static struct platform_driver brcmstb_qspi_driver = {
+ .probe = brcmstb_qspi_probe,
+ .remove = brcmstb_qspi_remove,
+ .driver = {
+ .name = "brcmstb_qspi",
+ .pm = &bcm_qspi_pm_ops,
+ .of_match_table = brcmstb_qspi_of_match,
+ }
+};
+module_platform_driver(brcmstb_qspi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kamal Dasu");
+MODULE_DESCRIPTION("Broadcom SPI driver for settop SoC");
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
new file mode 100644
index 000000000..cceae816c
--- /dev/null
+++ b/drivers/spi/spi-butterfly.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * parport-to-butterfly adapter
+ *
+ * Copyright (C) 2005 David Brownell
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/parport.h>
+
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/flash.h>
+
+#include <linux/mtd/partitions.h>
+
+/*
+ * This uses SPI to talk with an "AVR Butterfly", which is a $US20 card
+ * with a battery powered AVR microcontroller and lots of goodies. You
+ * can use GCC to develop firmware for this.
+ *
+ * See Documentation/spi/butterfly.rst for information about how to build
+ * and use this custom parallel port cable.
+ */
+
+/* DATA output bits (pins 2..9 == D0..D7) */
+#define butterfly_nreset (1 << 1) /* pin 3 */
+
+#define spi_sck_bit (1 << 0) /* pin 2 */
+#define spi_mosi_bit (1 << 7) /* pin 9 */
+
+#define vcc_bits ((1 << 6) | (1 << 5)) /* pins 7, 8 */
+
+/* STATUS input bits */
+#define spi_miso_bit PARPORT_STATUS_BUSY /* pin 11 */
+
+/* CONTROL output bits */
+#define spi_cs_bit PARPORT_CONTROL_SELECT /* pin 17 */
+
+static inline struct butterfly *spidev_to_pp(struct spi_device *spi)
+{
+ return spi->controller_data;
+}
+
+struct butterfly {
+ /* REVISIT ... for now, this must be first */
+ struct spi_bitbang bitbang;
+
+ struct parport *port;
+ struct pardevice *pd;
+
+ u8 lastbyte;
+
+ struct spi_device *dataflash;
+ struct spi_device *butterfly;
+ struct spi_board_info info[2];
+
+};
+
+/*----------------------------------------------------------------------*/
+
+static inline void
+setsck(struct spi_device *spi, int is_on)
+{
+ struct butterfly *pp = spidev_to_pp(spi);
+ u8 bit, byte = pp->lastbyte;
+
+ bit = spi_sck_bit;
+
+ if (is_on)
+ byte |= bit;
+ else
+ byte &= ~bit;
+ parport_write_data(pp->port, byte);
+ pp->lastbyte = byte;
+}
+
+static inline void
+setmosi(struct spi_device *spi, int is_on)
+{
+ struct butterfly *pp = spidev_to_pp(spi);
+ u8 bit, byte = pp->lastbyte;
+
+ bit = spi_mosi_bit;
+
+ if (is_on)
+ byte |= bit;
+ else
+ byte &= ~bit;
+ parport_write_data(pp->port, byte);
+ pp->lastbyte = byte;
+}
+
+static inline int getmiso(struct spi_device *spi)
+{
+ struct butterfly *pp = spidev_to_pp(spi);
+ int value;
+ u8 bit;
+
+ bit = spi_miso_bit;
+
+ /* only STATUS_BUSY is NOT negated */
+ value = !(parport_read_status(pp->port) & bit);
+ return (bit == PARPORT_STATUS_BUSY) ? value : !value;
+}
+
+static void butterfly_chipselect(struct spi_device *spi, int value)
+{
+ struct butterfly *pp = spidev_to_pp(spi);
+
+ /* set default clock polarity */
+ if (value != BITBANG_CS_INACTIVE)
+ setsck(spi, spi->mode & SPI_CPOL);
+
+ /* here, value == "activate or not";
+ * most PARPORT_CONTROL_* bits are negated, so we must
+ * morph it to value == "bit value to write in control register"
+ */
+ if (spi_cs_bit == PARPORT_CONTROL_INIT)
+ value = !value;
+
+ parport_frob_control(pp->port, spi_cs_bit, value ? spi_cs_bit : 0);
+}
+
+/* we only needed to implement one mode here, and choose SPI_MODE_0 */
+
+#define spidelay(X) do { } while (0)
+/* #define spidelay ndelay */
+
+#include "spi-bitbang-txrx.h"
+
+static u32
+butterfly_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word,
+ u8 bits, unsigned flags)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* override default partitioning with cmdlinepart */
+static struct mtd_partition partitions[] = { {
+ /* JFFS2 wants partitions of 4*N blocks for this device,
+ * so sectors 0 and 1 can't be partitions by themselves.
+ */
+
+ /* sector 0 = 8 pages * 264 bytes/page (1 block)
+ * sector 1 = 248 pages * 264 bytes/page
+ */
+ .name = "bookkeeping", /* 66 KB */
+ .offset = 0,
+ .size = (8 + 248) * 264,
+ /* .mask_flags = MTD_WRITEABLE, */
+}, {
+ /* sector 2 = 256 pages * 264 bytes/page
+ * sectors 3-5 = 512 pages * 264 bytes/page
+ */
+ .name = "filesystem", /* 462 KB */
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+} };
+
+static struct flash_platform_data flash = {
+ .name = "butterflash",
+ .parts = partitions,
+ .nr_parts = ARRAY_SIZE(partitions),
+};
+
+/* REVISIT remove this ugly global and its "only one" limitation */
+static struct butterfly *butterfly;
+
+static void butterfly_attach(struct parport *p)
+{
+ struct pardevice *pd;
+ int status;
+ struct butterfly *pp;
+ struct spi_master *master;
+ struct device *dev = p->physport->dev;
+ struct pardev_cb butterfly_cb;
+
+ if (butterfly || !dev)
+ return;
+
+ /* REVISIT: this just _assumes_ a butterfly is there ... no probe,
+ * and no way to be selective about what it binds to.
+ */
+
+ master = spi_alloc_master(dev, sizeof(*pp));
+ if (!master) {
+ status = -ENOMEM;
+ goto done;
+ }
+ pp = spi_master_get_devdata(master);
+
+ /*
+ * SPI and bitbang hookup
+ *
+ * use default setup(), cleanup(), and transfer() methods; and
+ * only bother implementing mode 0. Start it later.
+ */
+ master->bus_num = 42;
+ master->num_chipselect = 2;
+
+ pp->bitbang.master = master;
+ pp->bitbang.chipselect = butterfly_chipselect;
+ pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0;
+
+ /*
+ * parport hookup
+ */
+ pp->port = p;
+ memset(&butterfly_cb, 0, sizeof(butterfly_cb));
+ butterfly_cb.private = pp;
+ pd = parport_register_dev_model(p, "spi_butterfly", &butterfly_cb, 0);
+ if (!pd) {
+ status = -ENOMEM;
+ goto clean0;
+ }
+ pp->pd = pd;
+
+ status = parport_claim(pd);
+ if (status < 0)
+ goto clean1;
+
+ /*
+ * Butterfly reset, powerup, run firmware
+ */
+ pr_debug("%s: powerup/reset Butterfly\n", p->name);
+
+ /* nCS for dataflash (this bit is inverted on output) */
+ parport_frob_control(pp->port, spi_cs_bit, 0);
+
+ /* stabilize power with chip in reset (nRESET), and
+ * spi_sck_bit clear (CPOL=0)
+ */
+ pp->lastbyte |= vcc_bits;
+ parport_write_data(pp->port, pp->lastbyte);
+ msleep(5);
+
+ /* take it out of reset; assume long reset delay */
+ pp->lastbyte |= butterfly_nreset;
+ parport_write_data(pp->port, pp->lastbyte);
+ msleep(100);
+
+ /*
+ * Start SPI ... for now, hide that we're two physical busses.
+ */
+ status = spi_bitbang_start(&pp->bitbang);
+ if (status < 0)
+ goto clean2;
+
+ /* Bus 1 lets us talk to at45db041b (firmware disables AVR SPI), AVR
+ * (firmware resets at45, acts as spi slave) or neither (we ignore
+ * both, AVR uses AT45). Here we expect firmware for the first option.
+ */
+
+ pp->info[0].max_speed_hz = 15 * 1000 * 1000;
+ strcpy(pp->info[0].modalias, "mtd_dataflash");
+ pp->info[0].platform_data = &flash;
+ pp->info[0].chip_select = 1;
+ pp->info[0].controller_data = pp;
+ pp->dataflash = spi_new_device(pp->bitbang.master, &pp->info[0]);
+ if (pp->dataflash)
+ pr_debug("%s: dataflash at %s\n", p->name,
+ dev_name(&pp->dataflash->dev));
+
+ pr_info("%s: AVR Butterfly\n", p->name);
+ butterfly = pp;
+ return;
+
+clean2:
+ /* turn off VCC */
+ parport_write_data(pp->port, 0);
+
+ parport_release(pp->pd);
+clean1:
+ parport_unregister_device(pd);
+clean0:
+ spi_master_put(pp->bitbang.master);
+done:
+ pr_debug("%s: butterfly probe, fail %d\n", p->name, status);
+}
+
+static void butterfly_detach(struct parport *p)
+{
+ struct butterfly *pp;
+
+ /* FIXME this global is ugly ... but, how to quickly get from
+ * the parport to the "struct butterfly" associated with it?
+ * "old school" driver-internal device lists?
+ */
+ if (!butterfly || butterfly->port != p)
+ return;
+ pp = butterfly;
+ butterfly = NULL;
+
+ /* stop() unregisters child devices too */
+ spi_bitbang_stop(&pp->bitbang);
+
+ /* turn off VCC */
+ parport_write_data(pp->port, 0);
+ msleep(10);
+
+ parport_release(pp->pd);
+ parport_unregister_device(pp->pd);
+
+ spi_master_put(pp->bitbang.master);
+}
+
+static struct parport_driver butterfly_driver = {
+ .name = "spi_butterfly",
+ .match_port = butterfly_attach,
+ .detach = butterfly_detach,
+ .devmodel = true,
+};
+module_parport_driver(butterfly_driver);
+
+MODULE_DESCRIPTION("Parport Adapter driver for AVR Butterfly");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
new file mode 100644
index 000000000..b371e4eb4
--- /dev/null
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -0,0 +1,1879 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Driver for Cadence QSPI Controller
+//
+// Copyright Altera Corporation (C) 2012-2014. All rights reserved.
+// Copyright Intel Corporation (C) 2019-2020. All rights reserved.
+// Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/timer.h>
+
+#define CQSPI_NAME "cadence-qspi"
+#define CQSPI_MAX_CHIPSELECT 16
+
+/* Quirks */
+#define CQSPI_NEEDS_WR_DELAY BIT(0)
+#define CQSPI_DISABLE_DAC_MODE BIT(1)
+#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
+#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
+#define CQSPI_SLOW_SRAM BIT(4)
+
+/* Capabilities */
+#define CQSPI_SUPPORTS_OCTAL BIT(0)
+
+#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
+
+struct cqspi_st;
+
+struct cqspi_flash_pdata {
+ struct cqspi_st *cqspi;
+ u32 clk_rate;
+ u32 read_delay;
+ u32 tshsl_ns;
+ u32 tsd2d_ns;
+ u32 tchsh_ns;
+ u32 tslch_ns;
+ u8 cs;
+};
+
+struct cqspi_st {
+ struct platform_device *pdev;
+ struct spi_master *master;
+ struct clk *clk;
+ unsigned int sclk;
+
+ void __iomem *iobase;
+ void __iomem *ahb_base;
+ resource_size_t ahb_size;
+ struct completion transfer_complete;
+
+ struct dma_chan *rx_chan;
+ struct completion rx_dma_complete;
+ dma_addr_t mmap_phys_base;
+
+ int current_cs;
+ unsigned long master_ref_clk_hz;
+ bool is_decoded_cs;
+ u32 fifo_depth;
+ u32 fifo_width;
+ u32 num_chipselect;
+ bool rclk_en;
+ u32 trigger_address;
+ u32 wr_delay;
+ bool use_direct_mode;
+ struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
+ bool use_dma_read;
+ u32 pd_dev_id;
+ bool wr_completion;
+ bool slow_sram;
+};
+
+struct cqspi_driver_platdata {
+ u32 hwcaps_mask;
+ u8 quirks;
+ int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
+ u_char *rxbuf, loff_t from_addr, size_t n_rx);
+ u32 (*get_dma_status)(struct cqspi_st *cqspi);
+};
+
+/* Operation timeout value */
+#define CQSPI_TIMEOUT_MS 500
+#define CQSPI_READ_TIMEOUT_MS 10
+
+#define CQSPI_DUMMY_CLKS_PER_BYTE 8
+#define CQSPI_DUMMY_BYTES_MAX 4
+#define CQSPI_DUMMY_CLKS_MAX 31
+
+#define CQSPI_STIG_DATA_LEN_MAX 8
+
+/* Register map */
+#define CQSPI_REG_CONFIG 0x00
+#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
+#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
+#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
+#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
+#define CQSPI_REG_CONFIG_BAUD_LSB 19
+#define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
+#define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
+#define CQSPI_REG_CONFIG_IDLE_LSB 31
+#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
+#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
+
+#define CQSPI_REG_RD_INSTR 0x04
+#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
+#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
+#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
+#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
+
+#define CQSPI_REG_WR_INSTR 0x08
+#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
+
+#define CQSPI_REG_DELAY 0x0C
+#define CQSPI_REG_DELAY_TSLCH_LSB 0
+#define CQSPI_REG_DELAY_TCHSH_LSB 8
+#define CQSPI_REG_DELAY_TSD2D_LSB 16
+#define CQSPI_REG_DELAY_TSHSL_LSB 24
+#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
+#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
+#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
+#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
+
+#define CQSPI_REG_READCAPTURE 0x10
+#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
+#define CQSPI_REG_READCAPTURE_DELAY_LSB 1
+#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
+
+#define CQSPI_REG_SIZE 0x14
+#define CQSPI_REG_SIZE_ADDRESS_LSB 0
+#define CQSPI_REG_SIZE_PAGE_LSB 4
+#define CQSPI_REG_SIZE_BLOCK_LSB 16
+#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
+#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
+#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
+
+#define CQSPI_REG_SRAMPARTITION 0x18
+#define CQSPI_REG_INDIRECTTRIGGER 0x1C
+
+#define CQSPI_REG_DMA 0x20
+#define CQSPI_REG_DMA_SINGLE_LSB 0
+#define CQSPI_REG_DMA_BURST_LSB 8
+#define CQSPI_REG_DMA_SINGLE_MASK 0xFF
+#define CQSPI_REG_DMA_BURST_MASK 0xFF
+
+#define CQSPI_REG_REMAP 0x24
+#define CQSPI_REG_MODE_BIT 0x28
+
+#define CQSPI_REG_SDRAMLEVEL 0x2C
+#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
+#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
+#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
+#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
+
+#define CQSPI_REG_WR_COMPLETION_CTRL 0x38
+#define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
+
+#define CQSPI_REG_IRQSTATUS 0x40
+#define CQSPI_REG_IRQMASK 0x44
+
+#define CQSPI_REG_INDIRECTRD 0x60
+#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
+#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
+#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
+
+#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
+#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
+#define CQSPI_REG_INDIRECTRDBYTES 0x6C
+
+#define CQSPI_REG_CMDCTRL 0x90
+#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
+#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
+#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
+#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
+#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
+#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
+#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
+#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
+#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
+#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
+#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
+
+#define CQSPI_REG_INDIRECTWR 0x70
+#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
+#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
+#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
+
+#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
+#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
+#define CQSPI_REG_INDIRECTWRBYTES 0x7C
+
+#define CQSPI_REG_INDTRIG_ADDRRANGE 0x80
+
+#define CQSPI_REG_CMDADDRESS 0x94
+#define CQSPI_REG_CMDREADDATALOWER 0xA0
+#define CQSPI_REG_CMDREADDATAUPPER 0xA4
+#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
+#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
+
+#define CQSPI_REG_POLLING_STATUS 0xB0
+#define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16
+
+#define CQSPI_REG_OP_EXT_LOWER 0xE0
+#define CQSPI_REG_OP_EXT_READ_LSB 24
+#define CQSPI_REG_OP_EXT_WRITE_LSB 16
+#define CQSPI_REG_OP_EXT_STIG_LSB 0
+
+#define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000
+
+#define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800
+#define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804
+
+#define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C
+
+#define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814
+#define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818
+#define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C
+#define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1)
+
+#define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828
+
+#define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00
+#define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6
+
+/* Interrupt status bits */
+#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
+#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
+#define CQSPI_REG_IRQ_IND_COMP BIT(2)
+#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
+#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
+#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
+#define CQSPI_REG_IRQ_WATERMARK BIT(6)
+#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
+
+#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
+ CQSPI_REG_IRQ_IND_SRAM_FULL | \
+ CQSPI_REG_IRQ_IND_COMP)
+
+#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
+ CQSPI_REG_IRQ_WATERMARK | \
+ CQSPI_REG_IRQ_UNDERFLOW)
+
+#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
+#define CQSPI_DMA_UNALIGN 0x3
+
+#define CQSPI_REG_VERSAL_DMA_VAL 0x602
+
+static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
+{
+ u32 val;
+
+ return readl_relaxed_poll_timeout(reg, val,
+ (((clr ? ~val : val) & mask) == mask),
+ 10, CQSPI_TIMEOUT_MS * 1000);
+}
+
+static bool cqspi_is_idle(struct cqspi_st *cqspi)
+{
+ u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+
+ return reg & (1UL << CQSPI_REG_CONFIG_IDLE_LSB);
+}
+
+static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
+{
+ u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
+
+ reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
+ return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
+}
+
+static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi)
+{
+ u32 dma_status;
+
+ dma_status = readl(cqspi->iobase +
+ CQSPI_REG_VERSAL_DMA_DST_I_STS);
+ writel(dma_status, cqspi->iobase +
+ CQSPI_REG_VERSAL_DMA_DST_I_STS);
+
+ return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK;
+}
+
+static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
+{
+ struct cqspi_st *cqspi = dev;
+ unsigned int irq_status;
+ struct device *device = &cqspi->pdev->dev;
+ const struct cqspi_driver_platdata *ddata;
+
+ ddata = of_device_get_match_data(device);
+
+ /* Read interrupt status */
+ irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ /* Clear interrupt */
+ writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ if (cqspi->use_dma_read && ddata && ddata->get_dma_status) {
+ if (ddata->get_dma_status(cqspi)) {
+ complete(&cqspi->transfer_complete);
+ return IRQ_HANDLED;
+ }
+ }
+
+ else if (!cqspi->slow_sram)
+ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+ else
+ irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
+
+ if (irq_status)
+ complete(&cqspi->transfer_complete);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op)
+{
+ u32 rdreg = 0;
+
+ rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
+ rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
+ rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
+
+ return rdreg;
+}
+
+static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op)
+{
+ unsigned int dummy_clk;
+
+ if (!op->dummy.nbytes)
+ return 0;
+
+ dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
+ if (op->cmd.dtr)
+ dummy_clk /= 2;
+
+ return dummy_clk;
+}
+
+static int cqspi_wait_idle(struct cqspi_st *cqspi)
+{
+ const unsigned int poll_idle_retry = 3;
+ unsigned int count = 0;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
+ while (1) {
+ /*
+ * Read few times in succession to ensure the controller
+ * is indeed idle, that is, the bit does not transition
+ * low again.
+ */
+ if (cqspi_is_idle(cqspi))
+ count++;
+ else
+ count = 0;
+
+ if (count >= poll_idle_retry)
+ return 0;
+
+ if (time_after(jiffies, timeout)) {
+ /* Timeout, in busy mode. */
+ dev_err(&cqspi->pdev->dev,
+ "QSPI is still busy after %dms timeout.\n",
+ CQSPI_TIMEOUT_MS);
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ }
+}
+
+static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ int ret;
+
+ /* Write the CMDCTRL without start execution. */
+ writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+ /* Start execute */
+ reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
+ writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+
+ /* Polling for completion. */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
+ CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
+ if (ret) {
+ dev_err(&cqspi->pdev->dev,
+ "Flash command execution timed out.\n");
+ return ret;
+ }
+
+ /* Polling QSPI idle status. */
+ return cqspi_wait_idle(cqspi);
+}
+
+static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op,
+ unsigned int shift)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+ u8 ext;
+
+ if (op->cmd.nbytes != 2)
+ return -EINVAL;
+
+ /* Opcode extension is the LSB. */
+ ext = op->cmd.opcode & 0xff;
+
+ reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER);
+ reg &= ~(0xff << shift);
+ reg |= ext << shift;
+ writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER);
+
+ return 0;
+}
+
+static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op, unsigned int shift)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+ int ret;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+
+ /*
+ * We enable dual byte opcode here. The callers have to set up the
+ * extension opcode based on which type of operation it is.
+ */
+ if (op->cmd.dtr) {
+ reg |= CQSPI_REG_CONFIG_DTR_PROTO;
+ reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
+
+ /* Set up command opcode extension. */
+ ret = cqspi_setup_opcode_ext(f_pdata, op, shift);
+ if (ret)
+ return ret;
+ } else {
+ reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
+ reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
+ }
+
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+
+ return cqspi_wait_idle(cqspi);
+}
+
+static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ u8 *rxbuf = op->data.buf.in;
+ u8 opcode;
+ size_t n_rx = op->data.nbytes;
+ unsigned int rdreg;
+ unsigned int reg;
+ unsigned int dummy_clk;
+ size_t read_len;
+ int status;
+
+ status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
+ if (status)
+ return status;
+
+ if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
+ dev_err(&cqspi->pdev->dev,
+ "Invalid input argument, len %zu rxbuf 0x%p\n",
+ n_rx, rxbuf);
+ return -EINVAL;
+ }
+
+ if (op->cmd.dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
+
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+
+ rdreg = cqspi_calc_rdreg(op);
+ writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
+
+ dummy_clk = cqspi_calc_dummy(op);
+ if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+ return -EOPNOTSUPP;
+
+ if (dummy_clk)
+ reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
+ << CQSPI_REG_CMDCTRL_DUMMY_LSB;
+
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
+
+ /* 0 means 1 byte. */
+ reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+ status = cqspi_exec_flash_cmd(cqspi, reg);
+ if (status)
+ return status;
+
+ reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
+
+ /* Put the read value into rx_buf */
+ read_len = (n_rx > 4) ? 4 : n_rx;
+ memcpy(rxbuf, &reg, read_len);
+ rxbuf += read_len;
+
+ if (n_rx > 4) {
+ reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
+
+ read_len = n_rx - read_len;
+ memcpy(rxbuf, &reg, read_len);
+ }
+
+ return 0;
+}
+
+static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ u8 opcode;
+ const u8 *txbuf = op->data.buf.out;
+ size_t n_tx = op->data.nbytes;
+ unsigned int reg;
+ unsigned int data;
+ size_t write_len;
+ int ret;
+
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
+ if (ret)
+ return ret;
+
+ if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
+ dev_err(&cqspi->pdev->dev,
+ "Invalid input argument, cmdlen %zu txbuf 0x%p\n",
+ n_tx, txbuf);
+ return -EINVAL;
+ }
+
+ reg = cqspi_calc_rdreg(op);
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ if (op->cmd.dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
+
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+
+ if (op->addr.nbytes) {
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= ((op->addr.nbytes - 1) &
+ CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+
+ writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
+ }
+
+ if (n_tx) {
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
+ reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
+ data = 0;
+ write_len = (n_tx > 4) ? 4 : n_tx;
+ memcpy(&data, txbuf, write_len);
+ txbuf += write_len;
+ writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
+
+ if (n_tx > 4) {
+ data = 0;
+ write_len = n_tx - 4;
+ memcpy(&data, txbuf, write_len);
+ writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
+ }
+ }
+
+ return cqspi_exec_flash_cmd(cqspi, reg);
+}
+
+static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int dummy_clk = 0;
+ unsigned int reg;
+ int ret;
+ u8 opcode;
+
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB);
+ if (ret)
+ return ret;
+
+ if (op->cmd.dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
+
+ reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
+ reg |= cqspi_calc_rdreg(op);
+
+ /* Setup dummy clock cycles */
+ dummy_clk = cqspi_calc_dummy(op);
+
+ if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+ return -EOPNOTSUPP;
+
+ if (dummy_clk)
+ reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+ << CQSPI_REG_RD_INSTR_DUMMY_LSB;
+
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ /* Set address width */
+ reg = readl(reg_base + CQSPI_REG_SIZE);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (op->addr.nbytes - 1);
+ writel(reg, reg_base + CQSPI_REG_SIZE);
+ return 0;
+}
+
+static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
+ u8 *rxbuf, loff_t from_addr,
+ const size_t n_rx)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
+ void __iomem *reg_base = cqspi->iobase;
+ void __iomem *ahb_base = cqspi->ahb_base;
+ unsigned int remaining = n_rx;
+ unsigned int mod_bytes = n_rx % 4;
+ unsigned int bytes_to_read = 0;
+ u8 *rxbuf_end = rxbuf + n_rx;
+ int ret = 0;
+
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
+ writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ /*
+ * On SoCFPGA platform reading the SRAM is slow due to
+ * hardware limitation and causing read interrupt storm to CPU,
+ * so enabling only watermark interrupt to disable all read
+ * interrupts later as we want to run "bytes to read" loop with
+ * all the read interrupts disabled for max performance.
+ */
+
+ if (!cqspi->slow_sram)
+ writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+ else
+ writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
+
+ reinit_completion(&cqspi->transfer_complete);
+ writel(CQSPI_REG_INDIRECTRD_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ while (remaining > 0) {
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
+ ret = -ETIMEDOUT;
+
+ /*
+ * Disable all read interrupts until
+ * we are out of "bytes to read"
+ */
+ if (cqspi->slow_sram)
+ writel(0x0, reg_base + CQSPI_REG_IRQMASK);
+
+ bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+
+ if (ret && bytes_to_read == 0) {
+ dev_err(dev, "Indirect read timeout, no bytes\n");
+ goto failrd;
+ }
+
+ while (bytes_to_read != 0) {
+ unsigned int word_remain = round_down(remaining, 4);
+
+ bytes_to_read *= cqspi->fifo_width;
+ bytes_to_read = bytes_to_read > remaining ?
+ remaining : bytes_to_read;
+ bytes_to_read = round_down(bytes_to_read, 4);
+ /* Read 4 byte word chunks then single bytes */
+ if (bytes_to_read) {
+ ioread32_rep(ahb_base, rxbuf,
+ (bytes_to_read / 4));
+ } else if (!word_remain && mod_bytes) {
+ unsigned int temp = ioread32(ahb_base);
+
+ bytes_to_read = mod_bytes;
+ memcpy(rxbuf, &temp, min((unsigned int)
+ (rxbuf_end - rxbuf),
+ bytes_to_read));
+ }
+ rxbuf += bytes_to_read;
+ remaining -= bytes_to_read;
+ bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+ }
+
+ if (remaining > 0) {
+ reinit_completion(&cqspi->transfer_complete);
+ if (cqspi->slow_sram)
+ writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
+ }
+ }
+
+ /* Check indirect done status */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
+ CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
+ if (ret) {
+ dev_err(dev, "Indirect read completion error (%i)\n", ret);
+ goto failrd;
+ }
+
+ /* Disable interrupt */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
+
+ return 0;
+
+failrd:
+ /* Disable interrupt */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Cancel the indirect read */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+ return ret;
+}
+
+static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata,
+ u_char *rxbuf, loff_t from_addr,
+ size_t n_rx)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
+ void __iomem *reg_base = cqspi->iobase;
+ u32 reg, bytes_to_dma;
+ loff_t addr = from_addr;
+ void *buf = rxbuf;
+ dma_addr_t dma_addr;
+ u8 bytes_rem;
+ int ret = 0;
+
+ bytes_rem = n_rx % 4;
+ bytes_to_dma = (n_rx - bytes_rem);
+
+ if (!bytes_to_dma)
+ goto nondmard;
+
+ ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA);
+ if (ret)
+ return ret;
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "dma mapping failed\n");
+ return -ENOMEM;
+ }
+
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
+ writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
+ writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL,
+ reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ /* Enable DMA done interrupt */
+ writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK,
+ reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN);
+
+ /* Default DMA periph configuration */
+ writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA);
+
+ /* Configure DMA Dst address */
+ writel(lower_32_bits(dma_addr),
+ reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR);
+ writel(upper_32_bits(dma_addr),
+ reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB);
+
+ /* Configure DMA Src address */
+ writel(cqspi->trigger_address, reg_base +
+ CQSPI_REG_VERSAL_DMA_SRC_ADDR);
+
+ /* Set DMA destination size */
+ writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE);
+
+ /* Set DMA destination control */
+ writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL,
+ reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL);
+
+ writel(CQSPI_REG_INDIRECTRD_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ reinit_completion(&cqspi->transfer_complete);
+
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) {
+ ret = -ETIMEDOUT;
+ goto failrd;
+ }
+
+ /* Disable DMA interrupt */
+ writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
+ cqspi->iobase + CQSPI_REG_INDIRECTRD);
+ dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id,
+ PM_OSPI_MUX_SEL_LINEAR);
+ if (ret)
+ return ret;
+
+nondmard:
+ if (bytes_rem) {
+ addr += bytes_to_dma;
+ buf += bytes_to_dma;
+ ret = cqspi_indirect_read_execute(f_pdata, buf, addr,
+ bytes_rem);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+
+failrd:
+ /* Disable DMA interrupt */
+ writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
+
+ /* Cancel the indirect read */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR);
+
+ return ret;
+}
+
+static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
+{
+ unsigned int reg;
+ int ret;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ u8 opcode;
+
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB);
+ if (ret)
+ return ret;
+
+ if (op->cmd.dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
+
+ /* Set opcode. */
+ reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
+ reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
+ writel(reg, reg_base + CQSPI_REG_WR_INSTR);
+ reg = cqspi_calc_rdreg(op);
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ /*
+ * SPI NAND flashes require the address of the status register to be
+ * passed in the Read SR command. Also, some SPI NOR flashes like the
+ * cypress Semper flash expect a 4-byte dummy address in the Read SR
+ * command in DTR mode.
+ *
+ * But this controller does not support address phase in the Read SR
+ * command when doing auto-HW polling. So, disable write completion
+ * polling on the controller's side. spinand and spi-nor will take
+ * care of polling the status register.
+ */
+ if (cqspi->wr_completion) {
+ reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
+ writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ }
+
+ reg = readl(reg_base + CQSPI_REG_SIZE);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (op->addr.nbytes - 1);
+ writel(reg, reg_base + CQSPI_REG_SIZE);
+ return 0;
+}
+
+static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
+ loff_t to_addr, const u8 *txbuf,
+ const size_t n_tx)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int remaining = n_tx;
+ unsigned int write_bytes;
+ int ret;
+
+ writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
+ writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
+
+ reinit_completion(&cqspi->transfer_complete);
+ writel(CQSPI_REG_INDIRECTWR_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTWR);
+ /*
+ * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
+ * Controller programming sequence, couple of cycles of
+ * QSPI_REF_CLK delay is required for the above bit to
+ * be internally synchronized by the QSPI module. Provide 5
+ * cycles of delay.
+ */
+ if (cqspi->wr_delay)
+ ndelay(cqspi->wr_delay);
+
+ while (remaining > 0) {
+ size_t write_words, mod_bytes;
+
+ write_bytes = remaining;
+ write_words = write_bytes / 4;
+ mod_bytes = write_bytes % 4;
+ /* Write 4 bytes at a time then single bytes. */
+ if (write_words) {
+ iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
+ txbuf += (write_words * 4);
+ }
+ if (mod_bytes) {
+ unsigned int temp = 0xFFFFFFFF;
+
+ memcpy(&temp, txbuf, mod_bytes);
+ iowrite32(temp, cqspi->ahb_base);
+ txbuf += mod_bytes;
+ }
+
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
+ dev_err(dev, "Indirect write timeout\n");
+ ret = -ETIMEDOUT;
+ goto failwr;
+ }
+
+ remaining -= write_bytes;
+
+ if (remaining > 0)
+ reinit_completion(&cqspi->transfer_complete);
+ }
+
+ /* Check indirect done status */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
+ CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
+ if (ret) {
+ dev_err(dev, "Indirect write completion error (%i)\n", ret);
+ goto failwr;
+ }
+
+ /* Disable interrupt. */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
+
+ cqspi_wait_idle(cqspi);
+
+ return 0;
+
+failwr:
+ /* Disable interrupt. */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Cancel the indirect write */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTWR);
+ return ret;
+}
+
+static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int chip_select = f_pdata->cs;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ if (cqspi->is_decoded_cs) {
+ reg |= CQSPI_REG_CONFIG_DECODE_MASK;
+ } else {
+ reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
+
+ /* Convert CS if without decoder.
+ * CS0 to 4b'1110
+ * CS1 to 4b'1101
+ * CS2 to 4b'1011
+ * CS3 to 4b'0111
+ */
+ chip_select = 0xF & ~(1 << chip_select);
+ }
+
+ reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
+ << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
+ reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
+ << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
+ const unsigned int ns_val)
+{
+ unsigned int ticks;
+
+ ticks = ref_clk_hz / 1000; /* kHz */
+ ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
+
+ return ticks;
+}
+
+static void cqspi_delay(struct cqspi_flash_pdata *f_pdata)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *iobase = cqspi->iobase;
+ const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
+ unsigned int tshsl, tchsh, tslch, tsd2d;
+ unsigned int reg;
+ unsigned int tsclk;
+
+ /* calculate the number of ref ticks for one sclk tick */
+ tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
+
+ tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
+ /* this particular value must be at least one sclk */
+ if (tshsl < tsclk)
+ tshsl = tsclk;
+
+ tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
+ tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
+ tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
+
+ reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
+ << CQSPI_REG_DELAY_TSHSL_LSB;
+ reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
+ << CQSPI_REG_DELAY_TCHSH_LSB;
+ reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
+ << CQSPI_REG_DELAY_TSLCH_LSB;
+ reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
+ << CQSPI_REG_DELAY_TSD2D_LSB;
+ writel(reg, iobase + CQSPI_REG_DELAY);
+}
+
+static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
+{
+ const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
+ void __iomem *reg_base = cqspi->iobase;
+ u32 reg, div;
+
+ /* Recalculate the baudrate divisor based on QSPI specification. */
+ div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
+ reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_readdata_capture(struct cqspi_st *cqspi,
+ const bool bypass,
+ const unsigned int delay)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_READCAPTURE);
+
+ if (bypass)
+ reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+ else
+ reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+
+ reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
+ << CQSPI_REG_READCAPTURE_DELAY_LSB);
+
+ reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
+ << CQSPI_REG_READCAPTURE_DELAY_LSB;
+
+ writel(reg, reg_base + CQSPI_REG_READCAPTURE);
+}
+
+static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+
+ if (enable)
+ reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
+ else
+ reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
+
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
+ unsigned long sclk)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ int switch_cs = (cqspi->current_cs != f_pdata->cs);
+ int switch_ck = (cqspi->sclk != sclk);
+
+ if (switch_cs || switch_ck)
+ cqspi_controller_enable(cqspi, 0);
+
+ /* Switch chip select. */
+ if (switch_cs) {
+ cqspi->current_cs = f_pdata->cs;
+ cqspi_chipselect(f_pdata);
+ }
+
+ /* Setup baudrate divisor and delays */
+ if (switch_ck) {
+ cqspi->sclk = sclk;
+ cqspi_config_baudrate_div(cqspi);
+ cqspi_delay(f_pdata);
+ cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
+ f_pdata->read_delay);
+ }
+
+ if (switch_cs || switch_ck)
+ cqspi_controller_enable(cqspi, 1);
+}
+
+static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ loff_t to = op->addr.val;
+ size_t len = op->data.nbytes;
+ const u_char *buf = op->data.buf.out;
+ int ret;
+
+ ret = cqspi_write_setup(f_pdata, op);
+ if (ret)
+ return ret;
+
+ /*
+ * Some flashes like the Cypress Semper flash expect a dummy 4-byte
+ * address (all 0s) with the read status register command in DTR mode.
+ * But this controller does not support sending dummy address bytes to
+ * the flash when it is polling the write completion register in DTR
+ * mode. So, we can not use direct mode when in DTR mode for writing
+ * data.
+ */
+ if (!op->cmd.dtr && cqspi->use_direct_mode &&
+ ((to + len) <= cqspi->ahb_size)) {
+ memcpy_toio(cqspi->ahb_base + to, buf, len);
+ return cqspi_wait_idle(cqspi);
+ }
+
+ return cqspi_indirect_write_execute(f_pdata, to, buf, len);
+}
+
+static void cqspi_rx_dma_callback(void *param)
+{
+ struct cqspi_st *cqspi = param;
+
+ complete(&cqspi->rx_dma_complete);
+}
+
+static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
+ u_char *buf, loff_t from, size_t len)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
+ int ret = 0;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ dma_addr_t dma_dst;
+ struct device *ddev;
+
+ if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
+ memcpy_fromio(buf, cqspi->ahb_base + from, len);
+ return 0;
+ }
+
+ ddev = cqspi->rx_chan->device->dev;
+ dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ddev, dma_dst)) {
+ dev_err(dev, "dma mapping failed\n");
+ return -ENOMEM;
+ }
+ tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
+ len, flags);
+ if (!tx) {
+ dev_err(dev, "device_prep_dma_memcpy error\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ tx->callback = cqspi_rx_dma_callback;
+ tx->callback_param = cqspi;
+ cookie = tx->tx_submit(tx);
+ reinit_completion(&cqspi->rx_dma_complete);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "dma_submit_error %d\n", cookie);
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(cqspi->rx_chan);
+ if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
+ msecs_to_jiffies(max_t(size_t, len, 500)))) {
+ dmaengine_terminate_sync(cqspi->rx_chan);
+ dev_err(dev, "DMA wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
+ goto err_unmap;
+ }
+
+err_unmap:
+ dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
+ const struct cqspi_driver_platdata *ddata;
+ loff_t from = op->addr.val;
+ size_t len = op->data.nbytes;
+ u_char *buf = op->data.buf.in;
+ u64 dma_align = (u64)(uintptr_t)buf;
+ int ret;
+
+ ddata = of_device_get_match_data(dev);
+
+ ret = cqspi_read_setup(f_pdata, op);
+ if (ret)
+ return ret;
+
+ if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size))
+ return cqspi_direct_read_execute(f_pdata, buf, from, len);
+
+ if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma &&
+ virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0))
+ return ddata->indirect_read_dma(f_pdata, buf, from, len);
+
+ return cqspi_indirect_read_execute(f_pdata, buf, from, len);
+}
+
+static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
+ struct cqspi_flash_pdata *f_pdata;
+
+ f_pdata = &cqspi->f_pdata[mem->spi->chip_select];
+ cqspi_configure(f_pdata, mem->spi->max_speed_hz);
+
+ if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
+ if (!op->addr.nbytes)
+ return cqspi_command_read(f_pdata, op);
+
+ return cqspi_read(f_pdata, op);
+ }
+
+ if (!op->addr.nbytes || !op->data.buf.out)
+ return cqspi_command_write(f_pdata, op);
+
+ return cqspi_write(f_pdata, op);
+}
+
+static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ int ret;
+
+ ret = cqspi_mem_process(mem, op);
+ if (ret)
+ dev_err(&mem->spi->dev, "operation failed with %d\n", ret);
+
+ return ret;
+}
+
+static bool cqspi_supports_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ bool all_true, all_false;
+
+ /*
+ * op->dummy.dtr is required for converting nbytes into ncycles.
+ * Also, don't check the dtr field of the op phase having zero nbytes.
+ */
+ all_true = op->cmd.dtr &&
+ (!op->addr.nbytes || op->addr.dtr) &&
+ (!op->dummy.nbytes || op->dummy.dtr) &&
+ (!op->data.nbytes || op->data.dtr);
+
+ all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
+ !op->data.dtr;
+
+ if (all_true) {
+ /* Right now we only support 8-8-8 DTR mode. */
+ if (op->cmd.nbytes && op->cmd.buswidth != 8)
+ return false;
+ if (op->addr.nbytes && op->addr.buswidth != 8)
+ return false;
+ if (op->data.nbytes && op->data.buswidth != 8)
+ return false;
+ } else if (!all_false) {
+ /* Mixed DTR modes are not supported. */
+ return false;
+ }
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
+ struct cqspi_flash_pdata *f_pdata,
+ struct device_node *np)
+{
+ if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
+ dev_err(&pdev->dev, "couldn't determine read-delay\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
+ dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
+{
+ struct device *dev = &cqspi->pdev->dev;
+ struct device_node *np = dev->of_node;
+ u32 id[2];
+
+ cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
+
+ if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
+ dev_err(dev, "couldn't determine fifo-depth\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
+ dev_err(dev, "couldn't determine fifo-width\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,trigger-address",
+ &cqspi->trigger_address)) {
+ dev_err(dev, "couldn't determine trigger-address\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect))
+ cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT;
+
+ cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
+
+ if (!of_property_read_u32_array(np, "power-domains", id,
+ ARRAY_SIZE(id)))
+ cqspi->pd_dev_id = id[1];
+
+ return 0;
+}
+
+static void cqspi_controller_init(struct cqspi_st *cqspi)
+{
+ u32 reg;
+
+ cqspi_controller_enable(cqspi, 0);
+
+ /* Configure the remap address register, no remap */
+ writel(0, cqspi->iobase + CQSPI_REG_REMAP);
+
+ /* Disable all interrupts. */
+ writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
+
+ /* Configure the SRAM split to 1:1 . */
+ writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
+
+ /* Load indirect trigger address. */
+ writel(cqspi->trigger_address,
+ cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
+
+ /* Program read watermark -- 1/2 of the FIFO. */
+ writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
+ cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
+ /* Program write watermark -- 1/8 of the FIFO. */
+ writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
+ cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
+
+ /* Disable direct access controller */
+ if (!cqspi->use_direct_mode) {
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+ }
+
+ /* Enable DMA interface */
+ if (cqspi->use_dma_read) {
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+ }
+
+ cqspi_controller_enable(cqspi, 1);
+}
+
+static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ cqspi->rx_chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(cqspi->rx_chan)) {
+ int ret = PTR_ERR(cqspi->rx_chan);
+
+ cqspi->rx_chan = NULL;
+ return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
+ }
+ init_completion(&cqspi->rx_dma_complete);
+
+ return 0;
+}
+
+static const char *cqspi_get_name(struct spi_mem *mem)
+{
+ struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
+ struct device *dev = &cqspi->pdev->dev;
+
+ return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select);
+}
+
+static const struct spi_controller_mem_ops cqspi_mem_ops = {
+ .exec_op = cqspi_exec_mem_op,
+ .get_name = cqspi_get_name,
+ .supports_op = cqspi_supports_mem_op,
+};
+
+static const struct spi_controller_mem_caps cqspi_mem_caps = {
+ .dtr = true,
+};
+
+static int cqspi_setup_flash(struct cqspi_st *cqspi)
+{
+ struct platform_device *pdev = cqspi->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct cqspi_flash_pdata *f_pdata;
+ unsigned int cs;
+ int ret;
+
+ /* Get flash device data */
+ for_each_available_child_of_node(dev->of_node, np) {
+ ret = of_property_read_u32(np, "reg", &cs);
+ if (ret) {
+ dev_err(dev, "Couldn't determine chip select.\n");
+ of_node_put(np);
+ return ret;
+ }
+
+ if (cs >= CQSPI_MAX_CHIPSELECT) {
+ dev_err(dev, "Chip select %d out of range.\n", cs);
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ f_pdata = &cqspi->f_pdata[cs];
+ f_pdata->cqspi = cqspi;
+ f_pdata->cs = cs;
+
+ ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
+ if (ret) {
+ of_node_put(np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cqspi_probe(struct platform_device *pdev)
+{
+ const struct cqspi_driver_platdata *ddata;
+ struct reset_control *rstc, *rstc_ocp;
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct resource *res_ahb;
+ struct cqspi_st *cqspi;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*cqspi));
+ if (!master) {
+ dev_err(&pdev->dev, "spi_alloc_master failed\n");
+ return -ENOMEM;
+ }
+ master->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
+ master->mem_ops = &cqspi_mem_ops;
+ master->mem_caps = &cqspi_mem_caps;
+ master->dev.of_node = pdev->dev.of_node;
+
+ cqspi = spi_master_get_devdata(master);
+
+ cqspi->pdev = pdev;
+ cqspi->master = master;
+ platform_set_drvdata(pdev, cqspi);
+
+ /* Obtain configuration from OF. */
+ ret = cqspi_of_get_pdata(cqspi);
+ if (ret) {
+ dev_err(dev, "Cannot get mandatory OF data.\n");
+ return -ENODEV;
+ }
+
+ /* Obtain QSPI clock. */
+ cqspi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cqspi->clk)) {
+ dev_err(dev, "Cannot claim QSPI clock.\n");
+ ret = PTR_ERR(cqspi->clk);
+ return ret;
+ }
+
+ /* Obtain and remap controller address. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cqspi->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cqspi->iobase)) {
+ dev_err(dev, "Cannot remap controller address.\n");
+ ret = PTR_ERR(cqspi->iobase);
+ return ret;
+ }
+
+ /* Obtain and remap AHB address. */
+ res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
+ if (IS_ERR(cqspi->ahb_base)) {
+ dev_err(dev, "Cannot remap AHB address.\n");
+ ret = PTR_ERR(cqspi->ahb_base);
+ return ret;
+ }
+ cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
+ cqspi->ahb_size = resource_size(res_ahb);
+
+ init_completion(&cqspi->transfer_complete);
+
+ /* Obtain IRQ line. */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto probe_pm_failed;
+
+ ret = clk_prepare_enable(cqspi->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable QSPI clock.\n");
+ goto probe_clk_failed;
+ }
+
+ /* Obtain QSPI reset control */
+ rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ dev_err(dev, "Cannot get QSPI reset.\n");
+ goto probe_reset_failed;
+ }
+
+ rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
+ if (IS_ERR(rstc_ocp)) {
+ ret = PTR_ERR(rstc_ocp);
+ dev_err(dev, "Cannot get QSPI OCP reset.\n");
+ goto probe_reset_failed;
+ }
+
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
+
+ reset_control_assert(rstc_ocp);
+ reset_control_deassert(rstc_ocp);
+
+ cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+ master->max_speed_hz = cqspi->master_ref_clk_hz;
+
+ /* write completion is supported by default */
+ cqspi->wr_completion = true;
+
+ ddata = of_device_get_match_data(dev);
+ if (ddata) {
+ if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
+ cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC,
+ cqspi->master_ref_clk_hz);
+ if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
+ master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
+ if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
+ cqspi->use_direct_mode = true;
+ if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
+ cqspi->use_dma_read = true;
+ if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
+ cqspi->wr_completion = false;
+ if (ddata->quirks & CQSPI_SLOW_SRAM)
+ cqspi->slow_sram = true;
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,versal-ospi-1.0")) {
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ goto probe_reset_failed;
+ }
+ }
+
+ ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
+ pdev->name, cqspi);
+ if (ret) {
+ dev_err(dev, "Cannot request IRQ.\n");
+ goto probe_reset_failed;
+ }
+
+ cqspi_wait_idle(cqspi);
+ cqspi_controller_init(cqspi);
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+ master->num_chipselect = cqspi->num_chipselect;
+
+ ret = cqspi_setup_flash(cqspi);
+ if (ret) {
+ dev_err(dev, "failed to setup flash parameters %d\n", ret);
+ goto probe_setup_failed;
+ }
+
+ if (cqspi->use_direct_mode) {
+ ret = cqspi_request_mmap_dma(cqspi);
+ if (ret == -EPROBE_DEFER)
+ goto probe_setup_failed;
+ }
+
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
+ goto probe_setup_failed;
+ }
+
+ return 0;
+probe_setup_failed:
+ cqspi_controller_enable(cqspi, 0);
+probe_reset_failed:
+ clk_disable_unprepare(cqspi->clk);
+probe_clk_failed:
+ pm_runtime_put_sync(dev);
+probe_pm_failed:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int cqspi_remove(struct platform_device *pdev)
+{
+ struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+
+ spi_unregister_master(cqspi->master);
+ cqspi_controller_enable(cqspi, 0);
+
+ if (cqspi->rx_chan)
+ dma_release_channel(cqspi->rx_chan);
+
+ clk_disable_unprepare(cqspi->clk);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int cqspi_suspend(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ cqspi_controller_enable(cqspi, 0);
+
+ clk_disable_unprepare(cqspi->clk);
+
+ return ret;
+}
+
+static int cqspi_resume(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ clk_prepare_enable(cqspi->clk);
+ cqspi_wait_idle(cqspi);
+ cqspi_controller_init(cqspi);
+
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+ return spi_master_resume(master);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, cqspi_resume);
+
+static const struct cqspi_driver_platdata cdns_qspi = {
+ .quirks = CQSPI_DISABLE_DAC_MODE,
+};
+
+static const struct cqspi_driver_platdata k2g_qspi = {
+ .quirks = CQSPI_NEEDS_WR_DELAY,
+};
+
+static const struct cqspi_driver_platdata am654_ospi = {
+ .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
+ .quirks = CQSPI_NEEDS_WR_DELAY,
+};
+
+static const struct cqspi_driver_platdata intel_lgm_qspi = {
+ .quirks = CQSPI_DISABLE_DAC_MODE,
+};
+
+static const struct cqspi_driver_platdata socfpga_qspi = {
+ .quirks = CQSPI_DISABLE_DAC_MODE
+ | CQSPI_NO_SUPPORT_WR_COMPLETION
+ | CQSPI_SLOW_SRAM,
+};
+
+static const struct cqspi_driver_platdata versal_ospi = {
+ .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
+ .indirect_read_dma = cqspi_versal_indirect_read_dma,
+ .get_dma_status = cqspi_get_versal_dma_status,
+};
+
+static const struct of_device_id cqspi_dt_ids[] = {
+ {
+ .compatible = "cdns,qspi-nor",
+ .data = &cdns_qspi,
+ },
+ {
+ .compatible = "ti,k2g-qspi",
+ .data = &k2g_qspi,
+ },
+ {
+ .compatible = "ti,am654-ospi",
+ .data = &am654_ospi,
+ },
+ {
+ .compatible = "intel,lgm-qspi",
+ .data = &intel_lgm_qspi,
+ },
+ {
+ .compatible = "xlnx,versal-ospi-1.0",
+ .data = &versal_ospi,
+ },
+ {
+ .compatible = "intel,socfpga-qspi",
+ .data = &socfpga_qspi,
+ },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
+
+static struct platform_driver cqspi_platform_driver = {
+ .probe = cqspi_probe,
+ .remove = cqspi_remove,
+ .driver = {
+ .name = CQSPI_NAME,
+ .pm = &cqspi_dev_pm_ops,
+ .of_match_table = cqspi_dt_ids,
+ },
+};
+
+module_platform_driver(cqspi_platform_driver);
+
+MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" CQSPI_NAME);
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
+MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
new file mode 100644
index 000000000..d28b8bd5b
--- /dev/null
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Cadence XSPI flash controller driver
+// Copyright (C) 2020-21 Cadence
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/bitfield.h>
+#include <linux/limits.h>
+#include <linux/log2.h>
+
+#define CDNS_XSPI_MAGIC_NUM_VALUE 0x6522
+#define CDNS_XSPI_MAX_BANKS 8
+#define CDNS_XSPI_NAME "cadence-xspi"
+
+/*
+ * Note: below are additional auxiliary registers to
+ * configure XSPI controller pin-strap settings
+ */
+
+/* PHY DQ timing register */
+#define CDNS_XSPI_CCP_PHY_DQ_TIMING 0x0000
+
+/* PHY DQS timing register */
+#define CDNS_XSPI_CCP_PHY_DQS_TIMING 0x0004
+
+/* PHY gate loopback control register */
+#define CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL 0x0008
+
+/* PHY DLL slave control register */
+#define CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL 0x0010
+
+/* DLL PHY control register */
+#define CDNS_XSPI_DLL_PHY_CTRL 0x1034
+
+/* Command registers */
+#define CDNS_XSPI_CMD_REG_0 0x0000
+#define CDNS_XSPI_CMD_REG_1 0x0004
+#define CDNS_XSPI_CMD_REG_2 0x0008
+#define CDNS_XSPI_CMD_REG_3 0x000C
+#define CDNS_XSPI_CMD_REG_4 0x0010
+#define CDNS_XSPI_CMD_REG_5 0x0014
+
+/* Command status registers */
+#define CDNS_XSPI_CMD_STATUS_REG 0x0044
+
+/* Controller status register */
+#define CDNS_XSPI_CTRL_STATUS_REG 0x0100
+#define CDNS_XSPI_INIT_COMPLETED BIT(16)
+#define CDNS_XSPI_INIT_LEGACY BIT(9)
+#define CDNS_XSPI_INIT_FAIL BIT(8)
+#define CDNS_XSPI_CTRL_BUSY BIT(7)
+
+/* Controller interrupt status register */
+#define CDNS_XSPI_INTR_STATUS_REG 0x0110
+#define CDNS_XSPI_STIG_DONE BIT(23)
+#define CDNS_XSPI_SDMA_ERROR BIT(22)
+#define CDNS_XSPI_SDMA_TRIGGER BIT(21)
+#define CDNS_XSPI_CMD_IGNRD_EN BIT(20)
+#define CDNS_XSPI_DDMA_TERR_EN BIT(18)
+#define CDNS_XSPI_CDMA_TREE_EN BIT(17)
+#define CDNS_XSPI_CTRL_IDLE_EN BIT(16)
+
+#define CDNS_XSPI_TRD_COMP_INTR_STATUS 0x0120
+#define CDNS_XSPI_TRD_ERR_INTR_STATUS 0x0130
+#define CDNS_XSPI_TRD_ERR_INTR_EN 0x0134
+
+/* Controller interrupt enable register */
+#define CDNS_XSPI_INTR_ENABLE_REG 0x0114
+#define CDNS_XSPI_INTR_EN BIT(31)
+#define CDNS_XSPI_STIG_DONE_EN BIT(23)
+#define CDNS_XSPI_SDMA_ERROR_EN BIT(22)
+#define CDNS_XSPI_SDMA_TRIGGER_EN BIT(21)
+
+#define CDNS_XSPI_INTR_MASK (CDNS_XSPI_INTR_EN | \
+ CDNS_XSPI_STIG_DONE_EN | \
+ CDNS_XSPI_SDMA_ERROR_EN | \
+ CDNS_XSPI_SDMA_TRIGGER_EN)
+
+/* Controller config register */
+#define CDNS_XSPI_CTRL_CONFIG_REG 0x0230
+#define CDNS_XSPI_CTRL_WORK_MODE GENMASK(6, 5)
+
+#define CDNS_XSPI_WORK_MODE_DIRECT 0
+#define CDNS_XSPI_WORK_MODE_STIG 1
+#define CDNS_XSPI_WORK_MODE_ACMD 3
+
+/* SDMA trigger transaction registers */
+#define CDNS_XSPI_SDMA_SIZE_REG 0x0240
+#define CDNS_XSPI_SDMA_TRD_INFO_REG 0x0244
+#define CDNS_XSPI_SDMA_DIR BIT(8)
+
+/* Controller features register */
+#define CDNS_XSPI_CTRL_FEATURES_REG 0x0F04
+#define CDNS_XSPI_NUM_BANKS GENMASK(25, 24)
+#define CDNS_XSPI_DMA_DATA_WIDTH BIT(21)
+#define CDNS_XSPI_NUM_THREADS GENMASK(3, 0)
+
+/* Controller version register */
+#define CDNS_XSPI_CTRL_VERSION_REG 0x0F00
+#define CDNS_XSPI_MAGIC_NUM GENMASK(31, 16)
+#define CDNS_XSPI_CTRL_REV GENMASK(7, 0)
+
+/* STIG Profile 1.0 instruction fields (split into registers) */
+#define CDNS_XSPI_CMD_INSTR_TYPE GENMASK(6, 0)
+#define CDNS_XSPI_CMD_P1_R1_ADDR0 GENMASK(31, 24)
+#define CDNS_XSPI_CMD_P1_R2_ADDR1 GENMASK(7, 0)
+#define CDNS_XSPI_CMD_P1_R2_ADDR2 GENMASK(15, 8)
+#define CDNS_XSPI_CMD_P1_R2_ADDR3 GENMASK(23, 16)
+#define CDNS_XSPI_CMD_P1_R2_ADDR4 GENMASK(31, 24)
+#define CDNS_XSPI_CMD_P1_R3_ADDR5 GENMASK(7, 0)
+#define CDNS_XSPI_CMD_P1_R3_CMD GENMASK(23, 16)
+#define CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES GENMASK(30, 28)
+#define CDNS_XSPI_CMD_P1_R4_ADDR_IOS GENMASK(1, 0)
+#define CDNS_XSPI_CMD_P1_R4_CMD_IOS GENMASK(9, 8)
+#define CDNS_XSPI_CMD_P1_R4_BANK GENMASK(14, 12)
+
+/* STIG data sequence instruction fields (split into registers) */
+#define CDNS_XSPI_CMD_DSEQ_R2_DCNT_L GENMASK(31, 16)
+#define CDNS_XSPI_CMD_DSEQ_R3_DCNT_H GENMASK(15, 0)
+#define CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY GENMASK(25, 20)
+#define CDNS_XSPI_CMD_DSEQ_R4_BANK GENMASK(14, 12)
+#define CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS GENMASK(9, 8)
+#define CDNS_XSPI_CMD_DSEQ_R4_DIR BIT(4)
+
+/* STIG command status fields */
+#define CDNS_XSPI_CMD_STATUS_COMPLETED BIT(15)
+#define CDNS_XSPI_CMD_STATUS_FAILED BIT(14)
+#define CDNS_XSPI_CMD_STATUS_DQS_ERROR BIT(3)
+#define CDNS_XSPI_CMD_STATUS_CRC_ERROR BIT(2)
+#define CDNS_XSPI_CMD_STATUS_BUS_ERROR BIT(1)
+#define CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR BIT(0)
+
+#define CDNS_XSPI_STIG_DONE_FLAG BIT(0)
+#define CDNS_XSPI_TRD_STATUS 0x0104
+
+/* Helper macros for filling command registers */
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
+ CDNS_XSPI_STIG_INSTR_TYPE_1 : CDNS_XSPI_STIG_INSTR_TYPE_0) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R1_ADDR0, (op)->addr.val & 0xff))
+
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR1, ((op)->addr.val >> 8) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR2, ((op)->addr.val >> 16) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
+
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
+
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R4_ADDR_IOS, ilog2((op)->addr.buswidth)) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R4_CMD_IOS, ilog2((op)->cmd.buswidth)) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R4_BANK, chipsel))
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op) \
+ FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ)
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
+ ((op)->data.nbytes >> 16) & 0xffff) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
+ (op)->dummy.buswidth != 0 ? \
+ (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
+ 0))
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS, \
+ ilog2((op)->data.buswidth)) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DIR, \
+ ((op)->data.dir == SPI_MEM_DATA_IN) ? \
+ CDNS_XSPI_STIG_CMD_DIR_READ : CDNS_XSPI_STIG_CMD_DIR_WRITE))
+
+enum cdns_xspi_stig_instr_type {
+ CDNS_XSPI_STIG_INSTR_TYPE_0,
+ CDNS_XSPI_STIG_INSTR_TYPE_1,
+ CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ = 127,
+};
+
+enum cdns_xspi_sdma_dir {
+ CDNS_XSPI_SDMA_DIR_READ,
+ CDNS_XSPI_SDMA_DIR_WRITE,
+};
+
+enum cdns_xspi_stig_cmd_dir {
+ CDNS_XSPI_STIG_CMD_DIR_READ,
+ CDNS_XSPI_STIG_CMD_DIR_WRITE,
+};
+
+struct cdns_xspi_dev {
+ struct platform_device *pdev;
+ struct device *dev;
+
+ void __iomem *iobase;
+ void __iomem *auxbase;
+ void __iomem *sdmabase;
+
+ int irq;
+ int cur_cs;
+ unsigned int sdmasize;
+
+ struct completion cmd_complete;
+ struct completion auto_cmd_complete;
+ struct completion sdma_complete;
+ bool sdma_error;
+
+ void *in_buffer;
+ const void *out_buffer;
+
+ u8 hw_num_banks;
+};
+
+static int cdns_xspi_wait_for_controller_idle(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 ctrl_stat;
+
+ return readl_relaxed_poll_timeout(cdns_xspi->iobase +
+ CDNS_XSPI_CTRL_STATUS_REG,
+ ctrl_stat,
+ ((ctrl_stat &
+ CDNS_XSPI_CTRL_BUSY) == 0),
+ 100, 1000);
+}
+
+static void cdns_xspi_trigger_command(struct cdns_xspi_dev *cdns_xspi,
+ u32 cmd_regs[6])
+{
+ writel(cmd_regs[5], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_5);
+ writel(cmd_regs[4], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_4);
+ writel(cmd_regs[3], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_3);
+ writel(cmd_regs[2], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_2);
+ writel(cmd_regs[1], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_1);
+ writel(cmd_regs[0], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_0);
+}
+
+static int cdns_xspi_check_command_status(struct cdns_xspi_dev *cdns_xspi)
+{
+ int ret = 0;
+ u32 cmd_status = readl(cdns_xspi->iobase + CDNS_XSPI_CMD_STATUS_REG);
+
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_COMPLETED) {
+ if ((cmd_status & CDNS_XSPI_CMD_STATUS_FAILED) != 0) {
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_DQS_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Incorrect DQS pulses detected\n");
+ ret = -EPROTO;
+ }
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_CRC_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "CRC error received\n");
+ ret = -EPROTO;
+ }
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_BUS_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Error resp on system DMA interface\n");
+ ret = -EPROTO;
+ }
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Invalid command sequence detected\n");
+ ret = -EPROTO;
+ }
+ }
+ } else {
+ dev_err(cdns_xspi->dev, "Fatal err - command not completed\n");
+ ret = -EPROTO;
+ }
+
+ return ret;
+}
+
+static void cdns_xspi_set_interrupts(struct cdns_xspi_dev *cdns_xspi,
+ bool enabled)
+{
+ u32 intr_enable;
+
+ intr_enable = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
+ if (enabled)
+ intr_enable |= CDNS_XSPI_INTR_MASK;
+ else
+ intr_enable &= ~CDNS_XSPI_INTR_MASK;
+ writel(intr_enable, cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
+}
+
+static int cdns_xspi_controller_init(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 ctrl_ver;
+ u32 ctrl_features;
+ u16 hw_magic_num;
+
+ ctrl_ver = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_VERSION_REG);
+ hw_magic_num = FIELD_GET(CDNS_XSPI_MAGIC_NUM, ctrl_ver);
+ if (hw_magic_num != CDNS_XSPI_MAGIC_NUM_VALUE) {
+ dev_err(cdns_xspi->dev,
+ "Incorrect XSPI magic number: %x, expected: %x\n",
+ hw_magic_num, CDNS_XSPI_MAGIC_NUM_VALUE);
+ return -EIO;
+ }
+
+ ctrl_features = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_FEATURES_REG);
+ cdns_xspi->hw_num_banks = FIELD_GET(CDNS_XSPI_NUM_BANKS, ctrl_features);
+ cdns_xspi_set_interrupts(cdns_xspi, false);
+
+ return 0;
+}
+
+static void cdns_xspi_sdma_handle(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 sdma_size, sdma_trd_info;
+ u8 sdma_dir;
+
+ sdma_size = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_SIZE_REG);
+ sdma_trd_info = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_TRD_INFO_REG);
+ sdma_dir = FIELD_GET(CDNS_XSPI_SDMA_DIR, sdma_trd_info);
+
+ switch (sdma_dir) {
+ case CDNS_XSPI_SDMA_DIR_READ:
+ ioread8_rep(cdns_xspi->sdmabase,
+ cdns_xspi->in_buffer, sdma_size);
+ break;
+
+ case CDNS_XSPI_SDMA_DIR_WRITE:
+ iowrite8_rep(cdns_xspi->sdmabase,
+ cdns_xspi->out_buffer, sdma_size);
+ break;
+ }
+}
+
+static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ const struct spi_mem_op *op,
+ bool data_phase)
+{
+ u32 cmd_regs[6];
+ u32 cmd_status;
+ int ret;
+
+ ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
+ if (ret < 0)
+ return -EIO;
+
+ writel(FIELD_PREP(CDNS_XSPI_CTRL_WORK_MODE, CDNS_XSPI_WORK_MODE_STIG),
+ cdns_xspi->iobase + CDNS_XSPI_CTRL_CONFIG_REG);
+
+ cdns_xspi_set_interrupts(cdns_xspi, true);
+ cdns_xspi->sdma_error = false;
+
+ memset(cmd_regs, 0, sizeof(cmd_regs));
+ cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
+ cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
+ cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
+ cdns_xspi->cur_cs);
+
+ cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
+
+ if (data_phase) {
+ cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
+ cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
+ cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
+ cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
+ cdns_xspi->cur_cs);
+
+ cdns_xspi->in_buffer = op->data.buf.in;
+ cdns_xspi->out_buffer = op->data.buf.out;
+
+ cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
+
+ wait_for_completion(&cdns_xspi->sdma_complete);
+ if (cdns_xspi->sdma_error) {
+ cdns_xspi_set_interrupts(cdns_xspi, false);
+ return -EIO;
+ }
+ cdns_xspi_sdma_handle(cdns_xspi);
+ }
+
+ wait_for_completion(&cdns_xspi->cmd_complete);
+ cdns_xspi_set_interrupts(cdns_xspi, false);
+
+ cmd_status = cdns_xspi_check_command_status(cdns_xspi);
+ if (cmd_status)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int cdns_xspi_mem_op(struct cdns_xspi_dev *cdns_xspi,
+ struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ enum spi_mem_data_dir dir = op->data.dir;
+
+ if (cdns_xspi->cur_cs != mem->spi->chip_select)
+ cdns_xspi->cur_cs = mem->spi->chip_select;
+
+ return cdns_xspi_send_stig_command(cdns_xspi, op,
+ (dir != SPI_MEM_NO_DATA));
+}
+
+static int cdns_xspi_mem_op_execute(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct cdns_xspi_dev *cdns_xspi =
+ spi_master_get_devdata(mem->spi->master);
+ int ret = 0;
+
+ ret = cdns_xspi_mem_op(cdns_xspi, mem, op);
+
+ return ret;
+}
+
+static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct cdns_xspi_dev *cdns_xspi =
+ spi_master_get_devdata(mem->spi->master);
+
+ op->data.nbytes = clamp_val(op->data.nbytes, 0, cdns_xspi->sdmasize);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops cadence_xspi_mem_ops = {
+ .exec_op = cdns_xspi_mem_op_execute,
+ .adjust_op_size = cdns_xspi_adjust_mem_op_size,
+};
+
+static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev)
+{
+ struct cdns_xspi_dev *cdns_xspi = dev;
+ u32 irq_status;
+ irqreturn_t result = IRQ_NONE;
+
+ irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
+ writel(irq_status, cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
+
+ if (irq_status &
+ (CDNS_XSPI_SDMA_ERROR | CDNS_XSPI_SDMA_TRIGGER |
+ CDNS_XSPI_STIG_DONE)) {
+ if (irq_status & CDNS_XSPI_SDMA_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Slave DMA transaction error\n");
+ cdns_xspi->sdma_error = true;
+ complete(&cdns_xspi->sdma_complete);
+ }
+
+ if (irq_status & CDNS_XSPI_SDMA_TRIGGER)
+ complete(&cdns_xspi->sdma_complete);
+
+ if (irq_status & CDNS_XSPI_STIG_DONE)
+ complete(&cdns_xspi->cmd_complete);
+
+ result = IRQ_HANDLED;
+ }
+
+ irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS);
+ if (irq_status) {
+ writel(irq_status,
+ cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS);
+
+ complete(&cdns_xspi->auto_cmd_complete);
+
+ result = IRQ_HANDLED;
+ }
+
+ return result;
+}
+
+static int cdns_xspi_of_get_plat_data(struct platform_device *pdev)
+{
+ struct device_node *node_prop = pdev->dev.of_node;
+ struct device_node *node_child;
+ unsigned int cs;
+
+ for_each_child_of_node(node_prop, node_child) {
+ if (!of_device_is_available(node_child))
+ continue;
+
+ if (of_property_read_u32(node_child, "reg", &cs)) {
+ dev_err(&pdev->dev, "Couldn't get memory chip select\n");
+ of_node_put(node_child);
+ return -ENXIO;
+ } else if (cs >= CDNS_XSPI_MAX_BANKS) {
+ dev_err(&pdev->dev, "reg (cs) parameter value too large\n");
+ of_node_put(node_child);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static void cdns_xspi_print_phy_config(struct cdns_xspi_dev *cdns_xspi)
+{
+ struct device *dev = cdns_xspi->dev;
+
+ dev_info(dev, "PHY configuration\n");
+ dev_info(dev, " * xspi_dll_phy_ctrl: %08x\n",
+ readl(cdns_xspi->iobase + CDNS_XSPI_DLL_PHY_CTRL));
+ dev_info(dev, " * phy_dq_timing: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQ_TIMING));
+ dev_info(dev, " * phy_dqs_timing: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQS_TIMING));
+ dev_info(dev, " * phy_gate_loopback_ctrl: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL));
+ dev_info(dev, " * phy_dll_slave_ctrl: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL));
+}
+
+static int cdns_xspi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master = NULL;
+ struct cdns_xspi_dev *cdns_xspi = NULL;
+ struct resource *res;
+ int ret;
+
+ master = devm_spi_alloc_master(dev, sizeof(*cdns_xspi));
+ if (!master)
+ return -ENOMEM;
+
+ master->mode_bits = SPI_3WIRE | SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL | SPI_RX_OCTAL |
+ SPI_MODE_0 | SPI_MODE_3;
+
+ master->mem_ops = &cadence_xspi_mem_ops;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = -1;
+
+ platform_set_drvdata(pdev, master);
+
+ cdns_xspi = spi_master_get_devdata(master);
+ cdns_xspi->pdev = pdev;
+ cdns_xspi->dev = &pdev->dev;
+ cdns_xspi->cur_cs = 0;
+
+ init_completion(&cdns_xspi->cmd_complete);
+ init_completion(&cdns_xspi->auto_cmd_complete);
+ init_completion(&cdns_xspi->sdma_complete);
+
+ ret = cdns_xspi_of_get_plat_data(pdev);
+ if (ret)
+ return -ENODEV;
+
+ cdns_xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "io");
+ if (IS_ERR(cdns_xspi->iobase)) {
+ dev_err(dev, "Failed to remap controller base address\n");
+ return PTR_ERR(cdns_xspi->iobase);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sdma");
+ cdns_xspi->sdmabase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cdns_xspi->sdmabase))
+ return PTR_ERR(cdns_xspi->sdmabase);
+ cdns_xspi->sdmasize = resource_size(res);
+
+ cdns_xspi->auxbase = devm_platform_ioremap_resource_byname(pdev, "aux");
+ if (IS_ERR(cdns_xspi->auxbase)) {
+ dev_err(dev, "Failed to remap AUX address\n");
+ return PTR_ERR(cdns_xspi->auxbase);
+ }
+
+ cdns_xspi->irq = platform_get_irq(pdev, 0);
+ if (cdns_xspi->irq < 0)
+ return -ENXIO;
+
+ ret = devm_request_irq(dev, cdns_xspi->irq, cdns_xspi_irq_handler,
+ IRQF_SHARED, pdev->name, cdns_xspi);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ: %d\n", cdns_xspi->irq);
+ return ret;
+ }
+
+ cdns_xspi_print_phy_config(cdns_xspi);
+
+ ret = cdns_xspi_controller_init(cdns_xspi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize controller\n");
+ return ret;
+ }
+
+ master->num_chipselect = 1 << cdns_xspi->hw_num_banks;
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(dev, "Failed to register SPI master\n");
+ return ret;
+ }
+
+ dev_info(dev, "Successfully registered SPI master\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id cdns_xspi_of_match[] = {
+ {
+ .compatible = "cdns,xspi-nor",
+ },
+ { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, cdns_xspi_of_match);
+#else
+#define cdns_xspi_of_match NULL
+#endif /* CONFIG_OF */
+
+static struct platform_driver cdns_xspi_platform_driver = {
+ .probe = cdns_xspi_probe,
+ .remove = NULL,
+ .driver = {
+ .name = CDNS_XSPI_NAME,
+ .of_match_table = cdns_xspi_of_match,
+ },
+};
+
+module_platform_driver(cdns_xspi_platform_driver);
+
+MODULE_DESCRIPTION("Cadence XSPI Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" CDNS_XSPI_NAME);
+MODULE_AUTHOR("Konrad Kociolek <konrad@cadence.com>");
+MODULE_AUTHOR("Jayshri Pawar <jpawar@cadence.com>");
+MODULE_AUTHOR("Parshuram Thombare <pthombar@cadence.com>");
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
new file mode 100644
index 000000000..6a7f7df1e
--- /dev/null
+++ b/drivers/spi/spi-cadence.c
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cadence SPI controller driver (master mode only)
+ *
+ * Copyright (C) 2008 - 2014 Xilinx, Inc.
+ *
+ * based on Blackfin On-Chip SPI Driver (spi_bfin5xx.c)
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+/* Name of this driver */
+#define CDNS_SPI_NAME "cdns-spi"
+
+/* Register offset definitions */
+#define CDNS_SPI_CR 0x00 /* Configuration Register, RW */
+#define CDNS_SPI_ISR 0x04 /* Interrupt Status Register, RO */
+#define CDNS_SPI_IER 0x08 /* Interrupt Enable Register, WO */
+#define CDNS_SPI_IDR 0x0c /* Interrupt Disable Register, WO */
+#define CDNS_SPI_IMR 0x10 /* Interrupt Enabled Mask Register, RO */
+#define CDNS_SPI_ER 0x14 /* Enable/Disable Register, RW */
+#define CDNS_SPI_DR 0x18 /* Delay Register, RW */
+#define CDNS_SPI_TXD 0x1C /* Data Transmit Register, WO */
+#define CDNS_SPI_RXD 0x20 /* Data Receive Register, RO */
+#define CDNS_SPI_SICR 0x24 /* Slave Idle Count Register, RW */
+#define CDNS_SPI_THLD 0x28 /* Transmit FIFO Watermark Register,RW */
+
+#define SPI_AUTOSUSPEND_TIMEOUT 3000
+/*
+ * SPI Configuration Register bit Masks
+ *
+ * This register contains various control bits that affect the operation
+ * of the SPI controller
+ */
+#define CDNS_SPI_CR_MANSTRT 0x00010000 /* Manual TX Start */
+#define CDNS_SPI_CR_CPHA 0x00000004 /* Clock Phase Control */
+#define CDNS_SPI_CR_CPOL 0x00000002 /* Clock Polarity Control */
+#define CDNS_SPI_CR_SSCTRL 0x00003C00 /* Slave Select Mask */
+#define CDNS_SPI_CR_PERI_SEL 0x00000200 /* Peripheral Select Decode */
+#define CDNS_SPI_CR_BAUD_DIV 0x00000038 /* Baud Rate Divisor Mask */
+#define CDNS_SPI_CR_MSTREN 0x00000001 /* Master Enable Mask */
+#define CDNS_SPI_CR_MANSTRTEN 0x00008000 /* Manual TX Enable Mask */
+#define CDNS_SPI_CR_SSFORCE 0x00004000 /* Manual SS Enable Mask */
+#define CDNS_SPI_CR_BAUD_DIV_4 0x00000008 /* Default Baud Div Mask */
+#define CDNS_SPI_CR_DEFAULT (CDNS_SPI_CR_MSTREN | \
+ CDNS_SPI_CR_SSCTRL | \
+ CDNS_SPI_CR_SSFORCE | \
+ CDNS_SPI_CR_BAUD_DIV_4)
+
+/*
+ * SPI Configuration Register - Baud rate and slave select
+ *
+ * These are the values used in the calculation of baud rate divisor and
+ * setting the slave select.
+ */
+
+#define CDNS_SPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
+#define CDNS_SPI_BAUD_DIV_MIN 1 /* Baud rate divisor minimum */
+#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
+#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
+#define CDNS_SPI_SS0 0x1 /* Slave Select zero */
+#define CDNS_SPI_NOSS 0xF /* No Slave select */
+
+/*
+ * SPI Interrupt Registers bit Masks
+ *
+ * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
+ * bit definitions.
+ */
+#define CDNS_SPI_IXR_TXOW 0x00000004 /* SPI TX FIFO Overwater */
+#define CDNS_SPI_IXR_MODF 0x00000002 /* SPI Mode Fault */
+#define CDNS_SPI_IXR_RXNEMTY 0x00000010 /* SPI RX FIFO Not Empty */
+#define CDNS_SPI_IXR_DEFAULT (CDNS_SPI_IXR_TXOW | \
+ CDNS_SPI_IXR_MODF)
+#define CDNS_SPI_IXR_TXFULL 0x00000008 /* SPI TX Full */
+#define CDNS_SPI_IXR_ALL 0x0000007F /* SPI all interrupts */
+
+/*
+ * SPI Enable Register bit Masks
+ *
+ * This register is used to enable or disable the SPI controller
+ */
+#define CDNS_SPI_ER_ENABLE 0x00000001 /* SPI Enable Bit Mask */
+#define CDNS_SPI_ER_DISABLE 0x0 /* SPI Disable Bit Mask */
+
+/* Default number of chip select lines */
+#define CDNS_SPI_DEFAULT_NUM_CS 4
+
+/**
+ * struct cdns_spi - This definition defines spi driver instance
+ * @regs: Virtual address of the SPI controller registers
+ * @ref_clk: Pointer to the peripheral clock
+ * @pclk: Pointer to the APB clock
+ * @speed_hz: Current SPI bus clock speed in Hz
+ * @txbuf: Pointer to the TX buffer
+ * @rxbuf: Pointer to the RX buffer
+ * @tx_bytes: Number of bytes left to transfer
+ * @rx_bytes: Number of bytes requested
+ * @dev_busy: Device busy flag
+ * @is_decoded_cs: Flag for decoder property set or not
+ * @tx_fifo_depth: Depth of the TX FIFO
+ */
+struct cdns_spi {
+ void __iomem *regs;
+ struct clk *ref_clk;
+ struct clk *pclk;
+ unsigned int clk_rate;
+ u32 speed_hz;
+ const u8 *txbuf;
+ u8 *rxbuf;
+ int tx_bytes;
+ int rx_bytes;
+ u8 dev_busy;
+ u32 is_decoded_cs;
+ unsigned int tx_fifo_depth;
+};
+
+/* Macros for the SPI controller read/write */
+static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
+{
+ return readl_relaxed(xspi->regs + offset);
+}
+
+static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
+{
+ writel_relaxed(val, xspi->regs + offset);
+}
+
+/**
+ * cdns_spi_init_hw - Initialize the hardware and configure the SPI controller
+ * @xspi: Pointer to the cdns_spi structure
+ *
+ * On reset the SPI controller is configured to be in master mode, baud rate
+ * divisor is set to 4, threshold value for TX FIFO not full interrupt is set
+ * to 1 and size of the word to be transferred as 8 bit.
+ * This function initializes the SPI controller to disable and clear all the
+ * interrupts, enable manual slave select and manual start, deselect all the
+ * chip select lines, and enable the SPI controller.
+ */
+static void cdns_spi_init_hw(struct cdns_spi *xspi)
+{
+ u32 ctrl_reg = CDNS_SPI_CR_DEFAULT;
+
+ if (xspi->is_decoded_cs)
+ ctrl_reg |= CDNS_SPI_CR_PERI_SEL;
+
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+ cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_ALL);
+
+ /* Clear the RX FIFO */
+ while (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_RXNEMTY)
+ cdns_spi_read(xspi, CDNS_SPI_RXD);
+
+ cdns_spi_write(xspi, CDNS_SPI_ISR, CDNS_SPI_IXR_ALL);
+ cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
+}
+
+/**
+ * cdns_spi_chipselect - Select or deselect the chip select line
+ * @spi: Pointer to the spi_device structure
+ * @is_high: Select(0) or deselect (1) the chip select line
+ */
+static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ctrl_reg;
+
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
+
+ if (is_high) {
+ /* Deselect the slave */
+ ctrl_reg |= CDNS_SPI_CR_SSCTRL;
+ } else {
+ /* Select the slave */
+ ctrl_reg &= ~CDNS_SPI_CR_SSCTRL;
+ if (!(xspi->is_decoded_cs))
+ ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) <<
+ CDNS_SPI_SS_SHIFT) &
+ CDNS_SPI_CR_SSCTRL;
+ else
+ ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) &
+ CDNS_SPI_CR_SSCTRL;
+ }
+
+ cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
+}
+
+/**
+ * cdns_spi_config_clock_mode - Sets clock polarity and phase
+ * @spi: Pointer to the spi_device structure
+ *
+ * Sets the requested clock polarity and phase.
+ */
+static void cdns_spi_config_clock_mode(struct spi_device *spi)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ctrl_reg, new_ctrl_reg;
+
+ new_ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
+ ctrl_reg = new_ctrl_reg;
+
+ /* Set the SPI clock phase and clock polarity */
+ new_ctrl_reg &= ~(CDNS_SPI_CR_CPHA | CDNS_SPI_CR_CPOL);
+ if (spi->mode & SPI_CPHA)
+ new_ctrl_reg |= CDNS_SPI_CR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ new_ctrl_reg |= CDNS_SPI_CR_CPOL;
+
+ if (new_ctrl_reg != ctrl_reg) {
+ /*
+ * Just writing the CR register does not seem to apply the clock
+ * setting changes. This is problematic when changing the clock
+ * polarity as it will cause the SPI slave to see spurious clock
+ * transitions. To workaround the issue toggle the ER register.
+ */
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+ cdns_spi_write(xspi, CDNS_SPI_CR, new_ctrl_reg);
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
+ }
+}
+
+/**
+ * cdns_spi_config_clock_freq - Sets clock frequency
+ * @spi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer setup parameters
+ *
+ * Sets the requested clock frequency.
+ * Note: If the requested frequency is not an exact match with what can be
+ * obtained using the prescalar value the driver sets the clock frequency which
+ * is lower than the requested frequency (maximum lower) for the transfer. If
+ * the requested frequency is higher or lower than that is supported by the SPI
+ * controller the driver will set the highest or lowest frequency supported by
+ * controller.
+ */
+static void cdns_spi_config_clock_freq(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ctrl_reg, baud_rate_val;
+ unsigned long frequency;
+
+ frequency = xspi->clk_rate;
+
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
+
+ /* Set the clock frequency */
+ if (xspi->speed_hz != transfer->speed_hz) {
+ /* first valid value is 1 */
+ baud_rate_val = CDNS_SPI_BAUD_DIV_MIN;
+ while ((baud_rate_val < CDNS_SPI_BAUD_DIV_MAX) &&
+ (frequency / (2 << baud_rate_val)) > transfer->speed_hz)
+ baud_rate_val++;
+
+ ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV;
+ ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT;
+
+ xspi->speed_hz = frequency / (2 << baud_rate_val);
+ }
+ cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
+}
+
+/**
+ * cdns_spi_setup_transfer - Configure SPI controller for specified transfer
+ * @spi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer setup parameters
+ *
+ * Sets the operational mode of SPI controller for the next SPI transfer and
+ * sets the requested clock frequency.
+ *
+ * Return: Always 0
+ */
+static int cdns_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+
+ cdns_spi_config_clock_freq(spi, transfer);
+
+ dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u clock speed\n",
+ __func__, spi->mode, spi->bits_per_word,
+ xspi->speed_hz);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
+ * @xspi: Pointer to the cdns_spi structure
+ */
+static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
+{
+ unsigned long trans_cnt = 0;
+
+ while ((trans_cnt < xspi->tx_fifo_depth) &&
+ (xspi->tx_bytes > 0)) {
+
+ /* When xspi in busy condition, bytes may send failed,
+ * then spi control did't work thoroughly, add one byte delay
+ */
+ if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
+ CDNS_SPI_IXR_TXFULL)
+ udelay(10);
+
+ if (xspi->txbuf)
+ cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
+ else
+ cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
+
+ xspi->tx_bytes--;
+ trans_cnt++;
+ }
+}
+
+/**
+ * cdns_spi_irq - Interrupt service routine of the SPI controller
+ * @irq: IRQ number
+ * @dev_id: Pointer to the xspi structure
+ *
+ * This function handles TX empty and Mode Fault interrupts only.
+ * On TX empty interrupt this function reads the received data from RX FIFO and
+ * fills the TX FIFO if there is any data remaining to be transferred.
+ * On Mode Fault interrupt this function indicates that transfer is completed,
+ * the SPI subsystem will identify the error as the remaining bytes to be
+ * transferred is non-zero.
+ *
+ * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise.
+ */
+static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+ irqreturn_t status;
+ u32 intr_status;
+
+ status = IRQ_NONE;
+ intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR);
+ cdns_spi_write(xspi, CDNS_SPI_ISR, intr_status);
+
+ if (intr_status & CDNS_SPI_IXR_MODF) {
+ /* Indicate that transfer is completed, the SPI subsystem will
+ * identify the error as the remaining bytes to be
+ * transferred is non-zero
+ */
+ cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT);
+ spi_finalize_current_transfer(master);
+ status = IRQ_HANDLED;
+ } else if (intr_status & CDNS_SPI_IXR_TXOW) {
+ unsigned long trans_cnt;
+
+ trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
+
+ /* Read out the data from the RX FIFO */
+ while (trans_cnt) {
+ u8 data;
+
+ data = cdns_spi_read(xspi, CDNS_SPI_RXD);
+ if (xspi->rxbuf)
+ *xspi->rxbuf++ = data;
+
+ xspi->rx_bytes--;
+ trans_cnt--;
+ }
+
+ if (xspi->tx_bytes) {
+ /* There is more data to send */
+ cdns_spi_fill_tx_fifo(xspi);
+ } else {
+ /* Transfer is completed */
+ cdns_spi_write(xspi, CDNS_SPI_IDR,
+ CDNS_SPI_IXR_DEFAULT);
+ spi_finalize_current_transfer(master);
+ }
+ status = IRQ_HANDLED;
+ }
+
+ return status;
+}
+
+static int cdns_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ cdns_spi_config_clock_mode(msg->spi);
+ return 0;
+}
+
+/**
+ * cdns_transfer_one - Initiates the SPI transfer
+ * @master: Pointer to spi_master structure
+ * @spi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer parameters
+ *
+ * This function fills the TX FIFO, starts the SPI transfer and
+ * returns a positive transfer count so that core will wait for completion.
+ *
+ * Return: Number of bytes transferred in the last transfer
+ */
+static int cdns_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ xspi->txbuf = transfer->tx_buf;
+ xspi->rxbuf = transfer->rx_buf;
+ xspi->tx_bytes = transfer->len;
+ xspi->rx_bytes = transfer->len;
+
+ cdns_spi_setup_transfer(spi, transfer);
+ cdns_spi_fill_tx_fifo(xspi);
+ spi_transfer_delay_exec(transfer);
+
+ cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
+ return transfer->len;
+}
+
+/**
+ * cdns_prepare_transfer_hardware - Prepares hardware for transfer.
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function enables SPI master controller.
+ *
+ * Return: 0 always
+ */
+static int cdns_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
+
+ return 0;
+}
+
+/**
+ * cdns_unprepare_transfer_hardware - Relaxes hardware after transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function disables the SPI master controller when no slave selected.
+ *
+ * Return: 0 always
+ */
+static int cdns_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+ u32 ctrl_reg;
+
+ /* Disable the SPI if slave is deselected */
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
+ ctrl_reg = (ctrl_reg & CDNS_SPI_CR_SSCTRL) >> CDNS_SPI_SS_SHIFT;
+ if (ctrl_reg == CDNS_SPI_NOSS)
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_detect_fifo_depth - Detect the FIFO depth of the hardware
+ * @xspi: Pointer to the cdns_spi structure
+ *
+ * The depth of the TX FIFO is a synthesis configuration parameter of the SPI
+ * IP. The FIFO threshold register is sized so that its maximum value can be the
+ * FIFO size - 1. This is used to detect the size of the FIFO.
+ */
+static void cdns_spi_detect_fifo_depth(struct cdns_spi *xspi)
+{
+ /* The MSBs will get truncated giving us the size of the FIFO */
+ cdns_spi_write(xspi, CDNS_SPI_THLD, 0xffff);
+ xspi->tx_fifo_depth = cdns_spi_read(xspi, CDNS_SPI_THLD) + 1;
+
+ /* Reset to default */
+ cdns_spi_write(xspi, CDNS_SPI_THLD, 0x1);
+}
+
+/**
+ * cdns_spi_probe - Probe method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success and error value on error
+ */
+static int cdns_spi_probe(struct platform_device *pdev)
+{
+ int ret = 0, irq;
+ struct spi_master *master;
+ struct cdns_spi *xspi;
+ u32 num_cs;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
+ if (!master)
+ return -ENOMEM;
+
+ xspi = spi_master_get_devdata(master);
+ master->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, master);
+
+ xspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xspi->regs)) {
+ ret = PTR_ERR(xspi->regs);
+ goto remove_master;
+ }
+
+ xspi->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(xspi->pclk)) {
+ dev_err(&pdev->dev, "pclk clock not found.\n");
+ ret = PTR_ERR(xspi->pclk);
+ goto remove_master;
+ }
+
+ xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(xspi->ref_clk)) {
+ dev_err(&pdev->dev, "ref_clk clock not found.\n");
+ ret = PTR_ERR(xspi->ref_clk);
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xspi->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable APB clock.\n");
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xspi->ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto clk_dis_apb;
+ }
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+ if (ret < 0)
+ master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
+ else
+ master->num_chipselect = num_cs;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
+ &xspi->is_decoded_cs);
+ if (ret < 0)
+ xspi->is_decoded_cs = 0;
+
+ cdns_spi_detect_fifo_depth(xspi);
+
+ /* SPI controller initializations */
+ cdns_spi_init_hw(xspi);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ ret = -ENXIO;
+ goto clk_dis_all;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq,
+ 0, pdev->name, master);
+ if (ret != 0) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto clk_dis_all;
+ }
+
+ master->use_gpio_descriptors = true;
+ master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
+ master->prepare_message = cdns_prepare_message;
+ master->transfer_one = cdns_transfer_one;
+ master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
+ master->set_cs = cdns_spi_chipselect;
+ master->auto_runtime_pm = true;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ xspi->clk_rate = clk_get_rate(xspi->ref_clk);
+ /* Set to default valid value */
+ master->max_speed_hz = xspi->clk_rate / 4;
+ xspi->speed_hz = master->max_speed_hz;
+
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto clk_dis_all;
+ }
+
+ return ret;
+
+clk_dis_all:
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(xspi->ref_clk);
+clk_dis_apb:
+ clk_disable_unprepare(xspi->pclk);
+remove_master:
+ spi_master_put(master);
+ return ret;
+}
+
+/**
+ * cdns_spi_remove - Remove method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 on success and error value on error
+ */
+static int cdns_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+
+ clk_disable_unprepare(xspi->ref_clk);
+ clk_disable_unprepare(xspi->pclk);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ spi_unregister_master(master);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_suspend - Suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function disables the SPI controller and
+ * changes the driver state to "suspend"
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused cdns_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+/**
+ * cdns_spi_resume - Resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function changes the driver state to "ready"
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused cdns_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ cdns_spi_init_hw(xspi);
+ return spi_master_resume(master);
+}
+
+/**
+ * cdns_spi_runtime_resume - Runtime resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function enables the clocks
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused cdns_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(xspi->pclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable APB clock.\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xspi->ref_clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable device clock.\n");
+ clk_disable_unprepare(xspi->pclk);
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * cdns_spi_runtime_suspend - Runtime suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function disables the clocks
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused cdns_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(xspi->ref_clk);
+ clk_disable_unprepare(xspi->pclk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cdns_spi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(cdns_spi_runtime_suspend,
+ cdns_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(cdns_spi_suspend, cdns_spi_resume)
+};
+
+static const struct of_device_id cdns_spi_of_match[] = {
+ { .compatible = "xlnx,zynq-spi-r1p6" },
+ { .compatible = "cdns,spi-r1p6" },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, cdns_spi_of_match);
+
+/* cdns_spi_driver - This structure defines the SPI subsystem platform driver */
+static struct platform_driver cdns_spi_driver = {
+ .probe = cdns_spi_probe,
+ .remove = cdns_spi_remove,
+ .driver = {
+ .name = CDNS_SPI_NAME,
+ .of_match_table = cdns_spi_of_match,
+ .pm = &cdns_spi_dev_pm_ops,
+ },
+};
+
+module_platform_driver(cdns_spi_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Cadence SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-cavium-octeon.c b/drivers/spi/spi-cavium-octeon.c
new file mode 100644
index 000000000..1a2de6ce9
--- /dev/null
+++ b/drivers/spi/spi-cavium-octeon.c
@@ -0,0 +1,102 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium, Inc.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "spi-cavium.h"
+
+static int octeon_spi_probe(struct platform_device *pdev)
+{
+ void __iomem *reg_base;
+ struct spi_master *master;
+ struct octeon_spi *p;
+ int err = -ENOENT;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct octeon_spi));
+ if (!master)
+ return -ENOMEM;
+ p = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, master);
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base)) {
+ err = PTR_ERR(reg_base);
+ goto fail;
+ }
+
+ p->register_base = reg_base;
+ p->sys_freq = octeon_get_io_clock_rate();
+
+ p->regs.config = 0;
+ p->regs.status = 0x08;
+ p->regs.tx = 0x10;
+ p->regs.data = 0x80;
+
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPHA |
+ SPI_CPOL |
+ SPI_CS_HIGH |
+ SPI_LSB_FIRST |
+ SPI_3WIRE;
+
+ master->transfer_one_message = octeon_spi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+
+ master->dev.of_node = pdev->dev.of_node;
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err) {
+ dev_err(&pdev->dev, "register master failed: %d\n", err);
+ goto fail;
+ }
+
+ dev_info(&pdev->dev, "OCTEON SPI bus driver\n");
+
+ return 0;
+fail:
+ spi_master_put(master);
+ return err;
+}
+
+static int octeon_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct octeon_spi *p = spi_master_get_devdata(master);
+
+ /* Clear the CSENA* and put everything in a known state. */
+ writeq(0, p->register_base + OCTEON_SPI_CFG(p));
+
+ return 0;
+}
+
+static const struct of_device_id octeon_spi_match[] = {
+ { .compatible = "cavium,octeon-3010-spi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_spi_match);
+
+static struct platform_driver octeon_spi_driver = {
+ .driver = {
+ .name = "spi-octeon",
+ .of_match_table = octeon_spi_match,
+ },
+ .probe = octeon_spi_probe,
+ .remove = octeon_spi_remove,
+};
+
+module_platform_driver(octeon_spi_driver);
+
+MODULE_DESCRIPTION("Cavium, Inc. OCTEON SPI bus driver");
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
new file mode 100644
index 000000000..60c0d6934
--- /dev/null
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Cavium ThunderX SPI driver.
+ *
+ * Copyright (C) 2016 Cavium Inc.
+ * Authors: Jan Glauber <jglauber@cavium.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spi/spi.h>
+
+#include "spi-cavium.h"
+
+#define DRV_NAME "spi-thunderx"
+
+#define SYS_FREQ_DEFAULT 700000000 /* 700 Mhz */
+
+static int thunderx_spi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct octeon_spi *p;
+ int ret;
+
+ master = spi_alloc_master(dev, sizeof(struct octeon_spi));
+ if (!master)
+ return -ENOMEM;
+
+ p = spi_master_get_devdata(master);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ goto error;
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret)
+ goto error;
+
+ p->register_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!p->register_base) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ p->regs.config = 0x1000;
+ p->regs.status = 0x1008;
+ p->regs.tx = 0x1010;
+ p->regs.data = 0x1080;
+
+ p->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(p->clk)) {
+ ret = PTR_ERR(p->clk);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(p->clk);
+ if (ret)
+ goto error;
+
+ p->sys_freq = clk_get_rate(p->clk);
+ if (!p->sys_freq)
+ p->sys_freq = SYS_FREQ_DEFAULT;
+ dev_info(dev, "Set system clock to %u\n", p->sys_freq);
+
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH |
+ SPI_LSB_FIRST | SPI_3WIRE;
+ master->transfer_one_message = octeon_spi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+ master->dev.of_node = pdev->dev.of_node;
+
+ pci_set_drvdata(pdev, master);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ clk_disable_unprepare(p->clk);
+ pci_release_regions(pdev);
+ spi_master_put(master);
+ return ret;
+}
+
+static void thunderx_spi_remove(struct pci_dev *pdev)
+{
+ struct spi_master *master = pci_get_drvdata(pdev);
+ struct octeon_spi *p;
+
+ p = spi_master_get_devdata(master);
+ if (!p)
+ return;
+
+ clk_disable_unprepare(p->clk);
+ pci_release_regions(pdev);
+ /* Put everything in a known state. */
+ writeq(0, p->register_base + OCTEON_SPI_CFG(p));
+}
+
+static const struct pci_device_id thunderx_spi_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa00b) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, thunderx_spi_pci_id_table);
+
+static struct pci_driver thunderx_spi_driver = {
+ .name = DRV_NAME,
+ .id_table = thunderx_spi_pci_id_table,
+ .probe = thunderx_spi_probe,
+ .remove = thunderx_spi_remove,
+};
+
+module_pci_driver(thunderx_spi_driver);
+
+MODULE_DESCRIPTION("Cavium, Inc. ThunderX SPI bus driver");
+MODULE_AUTHOR("Jan Glauber");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-cavium.c b/drivers/spi/spi-cavium.c
new file mode 100644
index 000000000..6854c3ce4
--- /dev/null
+++ b/drivers/spi/spi-cavium.c
@@ -0,0 +1,150 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium, Inc.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "spi-cavium.h"
+
+static void octeon_spi_wait_ready(struct octeon_spi *p)
+{
+ union cvmx_mpi_sts mpi_sts;
+ unsigned int loops = 0;
+
+ do {
+ if (loops++)
+ __delay(500);
+ mpi_sts.u64 = readq(p->register_base + OCTEON_SPI_STS(p));
+ } while (mpi_sts.s.busy);
+}
+
+static int octeon_spi_do_transfer(struct octeon_spi *p,
+ struct spi_message *msg,
+ struct spi_transfer *xfer,
+ bool last_xfer)
+{
+ struct spi_device *spi = msg->spi;
+ union cvmx_mpi_cfg mpi_cfg;
+ union cvmx_mpi_tx mpi_tx;
+ unsigned int clkdiv;
+ int mode;
+ bool cpha, cpol;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+ int i;
+
+ mode = spi->mode;
+ cpha = mode & SPI_CPHA;
+ cpol = mode & SPI_CPOL;
+
+ clkdiv = p->sys_freq / (2 * xfer->speed_hz);
+
+ mpi_cfg.u64 = 0;
+
+ mpi_cfg.s.clkdiv = clkdiv;
+ mpi_cfg.s.cshi = (mode & SPI_CS_HIGH) ? 1 : 0;
+ mpi_cfg.s.lsbfirst = (mode & SPI_LSB_FIRST) ? 1 : 0;
+ mpi_cfg.s.wireor = (mode & SPI_3WIRE) ? 1 : 0;
+ mpi_cfg.s.idlelo = cpha != cpol;
+ mpi_cfg.s.cslate = cpha ? 1 : 0;
+ mpi_cfg.s.enable = 1;
+
+ if (spi->chip_select < 4)
+ p->cs_enax |= 1ull << (12 + spi->chip_select);
+ mpi_cfg.u64 |= p->cs_enax;
+
+ if (mpi_cfg.u64 != p->last_cfg) {
+ p->last_cfg = mpi_cfg.u64;
+ writeq(mpi_cfg.u64, p->register_base + OCTEON_SPI_CFG(p));
+ }
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+ len = xfer->len;
+ while (len > OCTEON_SPI_MAX_BYTES) {
+ for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
+ u8 d;
+ if (tx_buf)
+ d = *tx_buf++;
+ else
+ d = 0;
+ writeq(d, p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
+ }
+ mpi_tx.u64 = 0;
+ mpi_tx.s.csid = spi->chip_select;
+ mpi_tx.s.leavecs = 1;
+ mpi_tx.s.txnum = tx_buf ? OCTEON_SPI_MAX_BYTES : 0;
+ mpi_tx.s.totnum = OCTEON_SPI_MAX_BYTES;
+ writeq(mpi_tx.u64, p->register_base + OCTEON_SPI_TX(p));
+
+ octeon_spi_wait_ready(p);
+ if (rx_buf)
+ for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
+ u64 v = readq(p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
+ *rx_buf++ = (u8)v;
+ }
+ len -= OCTEON_SPI_MAX_BYTES;
+ }
+
+ for (i = 0; i < len; i++) {
+ u8 d;
+ if (tx_buf)
+ d = *tx_buf++;
+ else
+ d = 0;
+ writeq(d, p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
+ }
+
+ mpi_tx.u64 = 0;
+ mpi_tx.s.csid = spi->chip_select;
+ if (last_xfer)
+ mpi_tx.s.leavecs = xfer->cs_change;
+ else
+ mpi_tx.s.leavecs = !xfer->cs_change;
+ mpi_tx.s.txnum = tx_buf ? len : 0;
+ mpi_tx.s.totnum = len;
+ writeq(mpi_tx.u64, p->register_base + OCTEON_SPI_TX(p));
+
+ octeon_spi_wait_ready(p);
+ if (rx_buf)
+ for (i = 0; i < len; i++) {
+ u64 v = readq(p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
+ *rx_buf++ = (u8)v;
+ }
+
+ spi_transfer_delay_exec(xfer);
+
+ return xfer->len;
+}
+
+int octeon_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct octeon_spi *p = spi_master_get_devdata(master);
+ unsigned int total_len = 0;
+ int status = 0;
+ struct spi_transfer *xfer;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ bool last_xfer = list_is_last(&xfer->transfer_list,
+ &msg->transfers);
+ int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
+ if (r < 0) {
+ status = r;
+ goto err;
+ }
+ total_len += r;
+ }
+err:
+ msg->status = status;
+ msg->actual_length = total_len;
+ spi_finalize_current_message(master);
+ return status;
+}
diff --git a/drivers/spi/spi-cavium.h b/drivers/spi/spi-cavium.h
new file mode 100644
index 000000000..1f3ac463a
--- /dev/null
+++ b/drivers/spi/spi-cavium.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SPI_CAVIUM_H
+#define __SPI_CAVIUM_H
+
+#include <linux/clk.h>
+
+#define OCTEON_SPI_MAX_BYTES 9
+#define OCTEON_SPI_MAX_CLOCK_HZ 16000000
+
+struct octeon_spi_regs {
+ int config;
+ int status;
+ int tx;
+ int data;
+};
+
+struct octeon_spi {
+ void __iomem *register_base;
+ u64 last_cfg;
+ u64 cs_enax;
+ int sys_freq;
+ struct octeon_spi_regs regs;
+ struct clk *clk;
+};
+
+#define OCTEON_SPI_CFG(x) (x->regs.config)
+#define OCTEON_SPI_STS(x) (x->regs.status)
+#define OCTEON_SPI_TX(x) (x->regs.tx)
+#define OCTEON_SPI_DAT0(x) (x->regs.data)
+
+int octeon_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg);
+
+/* MPI register descriptions */
+
+#define CVMX_MPI_CFG (CVMX_ADD_IO_SEG(0x0001070000001000ull))
+#define CVMX_MPI_DATX(offset) (CVMX_ADD_IO_SEG(0x0001070000001080ull) + ((offset) & 15) * 8)
+#define CVMX_MPI_STS (CVMX_ADD_IO_SEG(0x0001070000001008ull))
+#define CVMX_MPI_TX (CVMX_ADD_IO_SEG(0x0001070000001010ull))
+
+union cvmx_mpi_cfg {
+ uint64_t u64;
+ struct cvmx_mpi_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t clkdiv:13;
+ uint64_t csena3:1;
+ uint64_t csena2:1;
+ uint64_t csena1:1;
+ uint64_t csena0:1;
+ uint64_t cslate:1;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t csena:1;
+ uint64_t int_ena:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t int_ena:1;
+ uint64_t csena:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t cslate:1;
+ uint64_t csena0:1;
+ uint64_t csena1:1;
+ uint64_t csena2:1;
+ uint64_t csena3:1;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+ struct cvmx_mpi_cfg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t clkdiv:13;
+ uint64_t reserved_12_15:4;
+ uint64_t cslate:1;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t csena:1;
+ uint64_t int_ena:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t int_ena:1;
+ uint64_t csena:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t cslate:1;
+ uint64_t reserved_12_15:4;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn30xx;
+ struct cvmx_mpi_cfg_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t clkdiv:13;
+ uint64_t reserved_11_15:5;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t csena:1;
+ uint64_t int_ena:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t int_ena:1;
+ uint64_t csena:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t reserved_11_15:5;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn31xx;
+ struct cvmx_mpi_cfg_cn30xx cn50xx;
+ struct cvmx_mpi_cfg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t clkdiv:13;
+ uint64_t reserved_14_15:2;
+ uint64_t csena1:1;
+ uint64_t csena0:1;
+ uint64_t cslate:1;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t reserved_6_6:1;
+ uint64_t int_ena:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t int_ena:1;
+ uint64_t reserved_6_6:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t cslate:1;
+ uint64_t csena0:1;
+ uint64_t csena1:1;
+ uint64_t reserved_14_15:2;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn61xx;
+ struct cvmx_mpi_cfg_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t clkdiv:13;
+ uint64_t csena3:1;
+ uint64_t csena2:1;
+ uint64_t reserved_12_13:2;
+ uint64_t cslate:1;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t reserved_6_6:1;
+ uint64_t int_ena:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t int_ena:1;
+ uint64_t reserved_6_6:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t cslate:1;
+ uint64_t reserved_12_13:2;
+ uint64_t csena2:1;
+ uint64_t csena3:1;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn66xx;
+ struct cvmx_mpi_cfg_cn61xx cnf71xx;
+};
+
+union cvmx_mpi_datx {
+ uint64_t u64;
+ struct cvmx_mpi_datx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t data:8;
+#else
+ uint64_t data:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+ struct cvmx_mpi_datx_s cn30xx;
+ struct cvmx_mpi_datx_s cn31xx;
+ struct cvmx_mpi_datx_s cn50xx;
+ struct cvmx_mpi_datx_s cn61xx;
+ struct cvmx_mpi_datx_s cn66xx;
+ struct cvmx_mpi_datx_s cnf71xx;
+};
+
+union cvmx_mpi_sts {
+ uint64_t u64;
+ struct cvmx_mpi_sts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t rxnum:5;
+ uint64_t reserved_1_7:7;
+ uint64_t busy:1;
+#else
+ uint64_t busy:1;
+ uint64_t reserved_1_7:7;
+ uint64_t rxnum:5;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_mpi_sts_s cn30xx;
+ struct cvmx_mpi_sts_s cn31xx;
+ struct cvmx_mpi_sts_s cn50xx;
+ struct cvmx_mpi_sts_s cn61xx;
+ struct cvmx_mpi_sts_s cn66xx;
+ struct cvmx_mpi_sts_s cnf71xx;
+};
+
+union cvmx_mpi_tx {
+ uint64_t u64;
+ struct cvmx_mpi_tx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t csid:2;
+ uint64_t reserved_17_19:3;
+ uint64_t leavecs:1;
+ uint64_t reserved_13_15:3;
+ uint64_t txnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t totnum:5;
+#else
+ uint64_t totnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t txnum:5;
+ uint64_t reserved_13_15:3;
+ uint64_t leavecs:1;
+ uint64_t reserved_17_19:3;
+ uint64_t csid:2;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_mpi_tx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t leavecs:1;
+ uint64_t reserved_13_15:3;
+ uint64_t txnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t totnum:5;
+#else
+ uint64_t totnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t txnum:5;
+ uint64_t reserved_13_15:3;
+ uint64_t leavecs:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn30xx;
+ struct cvmx_mpi_tx_cn30xx cn31xx;
+ struct cvmx_mpi_tx_cn30xx cn50xx;
+ struct cvmx_mpi_tx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t csid:1;
+ uint64_t reserved_17_19:3;
+ uint64_t leavecs:1;
+ uint64_t reserved_13_15:3;
+ uint64_t txnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t totnum:5;
+#else
+ uint64_t totnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t txnum:5;
+ uint64_t reserved_13_15:3;
+ uint64_t leavecs:1;
+ uint64_t reserved_17_19:3;
+ uint64_t csid:1;
+ uint64_t reserved_21_63:43;
+#endif
+ } cn61xx;
+ struct cvmx_mpi_tx_s cn66xx;
+ struct cvmx_mpi_tx_cn61xx cnf71xx;
+};
+
+#endif /* __SPI_CAVIUM_H */
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
new file mode 100644
index 000000000..c005ed26a
--- /dev/null
+++ b/drivers/spi/spi-clps711x.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * CLPS711X SPI bus driver
+ *
+ * Copyright (C) 2012-2016 Alexander Shiyan <shc_work@mail.ru>
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/clps711x.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "clps711x-spi"
+
+#define SYNCIO_FRMLEN(x) ((x) << 8)
+#define SYNCIO_TXFRMEN (1 << 14)
+
+struct spi_clps711x_data {
+ void __iomem *syncio;
+ struct regmap *syscon;
+ struct clk *spi_clk;
+
+ u8 *tx_buf;
+ u8 *rx_buf;
+ unsigned int bpw;
+ int len;
+};
+
+static int spi_clps711x_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+
+ /* Setup mode for transfer */
+ return regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCKNSEN,
+ (spi->mode & SPI_CPHA) ?
+ SYSCON3_ADCCKNSEN : 0);
+}
+
+static int spi_clps711x_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
+
+ clk_set_rate(hw->spi_clk, xfer->speed_hz ? : spi->max_speed_hz);
+
+ hw->len = xfer->len;
+ hw->bpw = xfer->bits_per_word;
+ hw->tx_buf = (u8 *)xfer->tx_buf;
+ hw->rx_buf = (u8 *)xfer->rx_buf;
+
+ /* Initiate transfer */
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN, hw->syncio);
+
+ return 1;
+}
+
+static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
+
+ /* Handle RX */
+ data = readb(hw->syncio);
+ if (hw->rx_buf)
+ *hw->rx_buf++ = data;
+
+ /* Handle TX */
+ if (--hw->len > 0) {
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN,
+ hw->syncio);
+ } else
+ spi_finalize_current_transfer(master);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_clps711x_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_clps711x_data *hw;
+ struct spi_master *master;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*hw));
+ if (!master)
+ return -ENOMEM;
+
+ master->use_gpio_descriptors = true;
+ master->bus_num = -1;
+ master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->prepare_message = spi_clps711x_prepare_message;
+ master->transfer_one = spi_clps711x_transfer_one;
+
+ hw = spi_master_get_devdata(master);
+
+ hw->spi_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hw->spi_clk)) {
+ ret = PTR_ERR(hw->spi_clk);
+ goto err_out;
+ }
+
+ hw->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
+ if (IS_ERR(hw->syscon)) {
+ ret = PTR_ERR(hw->syscon);
+ goto err_out;
+ }
+
+ hw->syncio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hw->syncio)) {
+ ret = PTR_ERR(hw->syncio);
+ goto err_out;
+ }
+
+ /* Disable extended mode due hardware problems */
+ regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCON, 0);
+
+ /* Clear possible pending interrupt */
+ readl(hw->syncio);
+
+ ret = devm_request_irq(&pdev->dev, irq, spi_clps711x_isr, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_out;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (!ret)
+ return 0;
+
+err_out:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static const struct of_device_id clps711x_spi_dt_ids[] = {
+ { .compatible = "cirrus,ep7209-spi", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clps711x_spi_dt_ids);
+
+static struct platform_driver clps711x_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = clps711x_spi_dt_ids,
+ },
+ .probe = spi_clps711x_probe,
+};
+module_platform_driver(clps711x_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("CLPS711X SPI bus driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
new file mode 100644
index 000000000..263ce9047
--- /dev/null
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale/Motorola Coldfire Queued SPI driver
+ *
+ * Copyright 2010 Steven King <sfking@fdwdc.com>
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfqspi.h>
+
+#define DRIVER_NAME "mcfqspi"
+
+#define MCFQSPI_BUSCLK (MCF_BUSCLK / 2)
+
+#define MCFQSPI_QMR 0x00
+#define MCFQSPI_QMR_MSTR 0x8000
+#define MCFQSPI_QMR_CPOL 0x0200
+#define MCFQSPI_QMR_CPHA 0x0100
+#define MCFQSPI_QDLYR 0x04
+#define MCFQSPI_QDLYR_SPE 0x8000
+#define MCFQSPI_QWR 0x08
+#define MCFQSPI_QWR_HALT 0x8000
+#define MCFQSPI_QWR_WREN 0x4000
+#define MCFQSPI_QWR_CSIV 0x1000
+#define MCFQSPI_QIR 0x0C
+#define MCFQSPI_QIR_WCEFB 0x8000
+#define MCFQSPI_QIR_ABRTB 0x4000
+#define MCFQSPI_QIR_ABRTL 0x1000
+#define MCFQSPI_QIR_WCEFE 0x0800
+#define MCFQSPI_QIR_ABRTE 0x0400
+#define MCFQSPI_QIR_SPIFE 0x0100
+#define MCFQSPI_QIR_WCEF 0x0008
+#define MCFQSPI_QIR_ABRT 0x0004
+#define MCFQSPI_QIR_SPIF 0x0001
+#define MCFQSPI_QAR 0x010
+#define MCFQSPI_QAR_TXBUF 0x00
+#define MCFQSPI_QAR_RXBUF 0x10
+#define MCFQSPI_QAR_CMDBUF 0x20
+#define MCFQSPI_QDR 0x014
+#define MCFQSPI_QCR 0x014
+#define MCFQSPI_QCR_CONT 0x8000
+#define MCFQSPI_QCR_BITSE 0x4000
+#define MCFQSPI_QCR_DT 0x2000
+
+struct mcfqspi {
+ void __iomem *iobase;
+ int irq;
+ struct clk *clk;
+ struct mcfqspi_cs_control *cs_control;
+
+ wait_queue_head_t waitq;
+};
+
+static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QMR);
+}
+
+static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QDLYR);
+}
+
+static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi)
+{
+ return readw(mcfqspi->iobase + MCFQSPI_QDLYR);
+}
+
+static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QWR);
+}
+
+static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QIR);
+}
+
+static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QAR);
+}
+
+static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QDR);
+}
+
+static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi)
+{
+ return readw(mcfqspi->iobase + MCFQSPI_QDR);
+}
+
+static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select,
+ bool cs_high)
+{
+ mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high);
+}
+
+static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select,
+ bool cs_high)
+{
+ mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high);
+}
+
+static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi)
+{
+ return (mcfqspi->cs_control->setup) ?
+ mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0;
+}
+
+static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi)
+{
+ if (mcfqspi->cs_control->teardown)
+ mcfqspi->cs_control->teardown(mcfqspi->cs_control);
+}
+
+static u8 mcfqspi_qmr_baud(u32 speed_hz)
+{
+ return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u);
+}
+
+static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi)
+{
+ return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE;
+}
+
+static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id)
+{
+ struct mcfqspi *mcfqspi = dev_id;
+
+ /* clear interrupt */
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF);
+ wake_up(&mcfqspi->waitq);
+
+ return IRQ_HANDLED;
+}
+
+static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count,
+ const u8 *txbuf, u8 *rxbuf)
+{
+ unsigned i, n, offset = 0;
+
+ n = min(count, 16u);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
+ if (txbuf)
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ else
+ for (i = 0; i < count; ++i)
+ mcfqspi_wr_qdr(mcfqspi, 0);
+
+ count -= n;
+ if (count) {
+ u16 qwr = 0xf08;
+ mcfqspi_wr_qwr(mcfqspi, 0x700);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+
+ do {
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+ n = min(count, 8u);
+ if (txbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_TXBUF + offset);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ }
+ qwr = (offset ? 0x808 : 0) + ((n - 1) << 8);
+ offset ^= 8;
+ count -= n;
+ } while (count);
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ offset ^= 8;
+ }
+ } else {
+ mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ }
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < n; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+}
+
+static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
+ const u16 *txbuf, u16 *rxbuf)
+{
+ unsigned i, n, offset = 0;
+
+ n = min(count, 16u);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
+ if (txbuf)
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ else
+ for (i = 0; i < count; ++i)
+ mcfqspi_wr_qdr(mcfqspi, 0);
+
+ count -= n;
+ if (count) {
+ u16 qwr = 0xf08;
+ mcfqspi_wr_qwr(mcfqspi, 0x700);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+
+ do {
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+ n = min(count, 8u);
+ if (txbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_TXBUF + offset);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ }
+ qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8);
+ offset ^= 8;
+ count -= n;
+ } while (count);
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ offset ^= 8;
+ }
+ } else {
+ mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ }
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < n; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+}
+
+static void mcfqspi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(spi->master);
+ bool cs_high = spi->mode & SPI_CS_HIGH;
+
+ if (enable)
+ mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
+ else
+ mcfqspi_cs_deselect(mcfqspi, spi->chip_select, cs_high);
+}
+
+static int mcfqspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ u16 qmr = MCFQSPI_QMR_MSTR;
+
+ qmr |= t->bits_per_word << 10;
+ if (spi->mode & SPI_CPHA)
+ qmr |= MCFQSPI_QMR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ qmr |= MCFQSPI_QMR_CPOL;
+ qmr |= mcfqspi_qmr_baud(t->speed_hz);
+ mcfqspi_wr_qmr(mcfqspi, qmr);
+
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
+ if (t->bits_per_word == 8)
+ mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf, t->rx_buf);
+ else
+ mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
+ t->rx_buf);
+ mcfqspi_wr_qir(mcfqspi, 0);
+
+ return 0;
+}
+
+static int mcfqspi_setup(struct spi_device *spi)
+{
+ mcfqspi_cs_deselect(spi_master_get_devdata(spi->master),
+ spi->chip_select, spi->mode & SPI_CS_HIGH);
+
+ dev_dbg(&spi->dev,
+ "bits per word %d, chip select %d, speed %d KHz\n",
+ spi->bits_per_word, spi->chip_select,
+ (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz))
+ / 1000);
+
+ return 0;
+}
+
+static int mcfqspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct mcfqspi *mcfqspi;
+ struct mcfqspi_platform_data *pdata;
+ int status;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "platform data is missing\n");
+ return -ENOENT;
+ }
+
+ if (!pdata->cs_control) {
+ dev_dbg(&pdev->dev, "pdata->cs_control is NULL\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi));
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "spi_alloc_master failed\n");
+ return -ENOMEM;
+ }
+
+ mcfqspi = spi_master_get_devdata(master);
+
+ mcfqspi->iobase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mcfqspi->iobase)) {
+ status = PTR_ERR(mcfqspi->iobase);
+ goto fail0;
+ }
+
+ mcfqspi->irq = platform_get_irq(pdev, 0);
+ if (mcfqspi->irq < 0) {
+ dev_dbg(&pdev->dev, "platform_get_irq failed\n");
+ status = -ENXIO;
+ goto fail0;
+ }
+
+ status = devm_request_irq(&pdev->dev, mcfqspi->irq, mcfqspi_irq_handler,
+ 0, pdev->name, mcfqspi);
+ if (status) {
+ dev_dbg(&pdev->dev, "request_irq failed\n");
+ goto fail0;
+ }
+
+ mcfqspi->clk = devm_clk_get(&pdev->dev, "qspi_clk");
+ if (IS_ERR(mcfqspi->clk)) {
+ dev_dbg(&pdev->dev, "clk_get failed\n");
+ status = PTR_ERR(mcfqspi->clk);
+ goto fail0;
+ }
+ clk_prepare_enable(mcfqspi->clk);
+
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+
+ mcfqspi->cs_control = pdata->cs_control;
+ status = mcfqspi_cs_setup(mcfqspi);
+ if (status) {
+ dev_dbg(&pdev->dev, "error initializing cs_control\n");
+ goto fail1;
+ }
+
+ init_waitqueue_head(&mcfqspi->waitq);
+
+ master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
+ master->setup = mcfqspi_setup;
+ master->set_cs = mcfqspi_set_cs;
+ master->transfer_one = mcfqspi_transfer_one;
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+ pm_runtime_enable(&pdev->dev);
+
+ status = devm_spi_register_master(&pdev->dev, master);
+ if (status) {
+ dev_dbg(&pdev->dev, "spi_register_master failed\n");
+ goto fail2;
+ }
+
+ dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
+
+ return 0;
+
+fail2:
+ pm_runtime_disable(&pdev->dev);
+ mcfqspi_cs_teardown(mcfqspi);
+fail1:
+ clk_disable_unprepare(mcfqspi->clk);
+fail0:
+ spi_master_put(master);
+
+ dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n");
+
+ return status;
+}
+
+static int mcfqspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+ /* disable the hardware (set the baud rate to 0) */
+ mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
+
+ mcfqspi_cs_teardown(mcfqspi);
+ clk_disable_unprepare(mcfqspi->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mcfqspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ clk_disable(mcfqspi->clk);
+
+ return 0;
+}
+
+static int mcfqspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ clk_enable(mcfqspi->clk);
+
+ return spi_master_resume(master);
+}
+#endif
+
+#ifdef CONFIG_PM
+static int mcfqspi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ clk_disable(mcfqspi->clk);
+
+ return 0;
+}
+
+static int mcfqspi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ clk_enable(mcfqspi->clk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mcfqspi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(mcfqspi_suspend, mcfqspi_resume)
+ SET_RUNTIME_PM_OPS(mcfqspi_runtime_suspend, mcfqspi_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver mcfqspi_driver = {
+ .driver.name = DRIVER_NAME,
+ .driver.owner = THIS_MODULE,
+ .driver.pm = &mcfqspi_pm,
+ .probe = mcfqspi_probe,
+ .remove = mcfqspi_remove,
+};
+module_platform_driver(mcfqspi_driver);
+
+MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
+MODULE_DESCRIPTION("Coldfire QSPI Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
new file mode 100644
index 000000000..d112c2cac
--- /dev/null
+++ b/drivers/spi/spi-davinci.c
@@ -0,0 +1,1053 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Texas Instruments.
+ * Copyright (C) 2010 EF Johnson Technologies
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/slab.h>
+
+#include <linux/platform_data/spi-davinci.h>
+
+#define CS_DEFAULT 0xFF
+
+#define SPIFMT_PHASE_MASK BIT(16)
+#define SPIFMT_POLARITY_MASK BIT(17)
+#define SPIFMT_DISTIMER_MASK BIT(18)
+#define SPIFMT_SHIFTDIR_MASK BIT(20)
+#define SPIFMT_WAITENA_MASK BIT(21)
+#define SPIFMT_PARITYENA_MASK BIT(22)
+#define SPIFMT_ODD_PARITY_MASK BIT(23)
+#define SPIFMT_WDELAY_MASK 0x3f000000u
+#define SPIFMT_WDELAY_SHIFT 24
+#define SPIFMT_PRESCALE_SHIFT 8
+
+/* SPIPC0 */
+#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
+#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
+#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
+#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
+
+#define SPIINT_MASKALL 0x0101035F
+#define SPIINT_MASKINT 0x0000015F
+#define SPI_INTLVL_1 0x000001FF
+#define SPI_INTLVL_0 0x00000000
+
+/* SPIDAT1 (upper 16 bit defines) */
+#define SPIDAT1_CSHOLD_MASK BIT(12)
+#define SPIDAT1_WDEL BIT(10)
+
+/* SPIGCR1 */
+#define SPIGCR1_CLKMOD_MASK BIT(1)
+#define SPIGCR1_MASTER_MASK BIT(0)
+#define SPIGCR1_POWERDOWN_MASK BIT(8)
+#define SPIGCR1_LOOPBACK_MASK BIT(16)
+#define SPIGCR1_SPIENA_MASK BIT(24)
+
+/* SPIBUF */
+#define SPIBUF_TXFULL_MASK BIT(29)
+#define SPIBUF_RXEMPTY_MASK BIT(31)
+
+/* SPIDELAY */
+#define SPIDELAY_C2TDELAY_SHIFT 24
+#define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
+#define SPIDELAY_T2CDELAY_SHIFT 16
+#define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
+#define SPIDELAY_T2EDELAY_SHIFT 8
+#define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
+#define SPIDELAY_C2EDELAY_SHIFT 0
+#define SPIDELAY_C2EDELAY_MASK 0xFF
+
+/* Error Masks */
+#define SPIFLG_DLEN_ERR_MASK BIT(0)
+#define SPIFLG_TIMEOUT_MASK BIT(1)
+#define SPIFLG_PARERR_MASK BIT(2)
+#define SPIFLG_DESYNC_MASK BIT(3)
+#define SPIFLG_BITERR_MASK BIT(4)
+#define SPIFLG_OVRRUN_MASK BIT(6)
+#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
+#define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \
+ | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
+ | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
+ | SPIFLG_OVRRUN_MASK)
+
+#define SPIINT_DMA_REQ_EN BIT(16)
+
+/* SPI Controller registers */
+#define SPIGCR0 0x00
+#define SPIGCR1 0x04
+#define SPIINT 0x08
+#define SPILVL 0x0c
+#define SPIFLG 0x10
+#define SPIPC0 0x14
+#define SPIDAT1 0x3c
+#define SPIBUF 0x40
+#define SPIDELAY 0x48
+#define SPIDEF 0x4c
+#define SPIFMT0 0x50
+
+#define DMA_MIN_BYTES 16
+
+/* SPI Controller driver's private data. */
+struct davinci_spi {
+ struct spi_bitbang bitbang;
+ struct clk *clk;
+
+ u8 version;
+ resource_size_t pbase;
+ void __iomem *base;
+ u32 irq;
+ struct completion done;
+
+ const void *tx;
+ void *rx;
+ int rcount;
+ int wcount;
+
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+
+ struct davinci_spi_platform_data pdata;
+
+ void (*get_rx)(u32 rx_data, struct davinci_spi *);
+ u32 (*get_tx)(struct davinci_spi *);
+
+ u8 *bytes_per_word;
+
+ u8 prescaler_limit;
+};
+
+static struct davinci_spi_config davinci_spi_default_cfg;
+
+static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi)
+{
+ if (dspi->rx) {
+ u8 *rx = dspi->rx;
+ *rx++ = (u8)data;
+ dspi->rx = rx;
+ }
+}
+
+static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi)
+{
+ if (dspi->rx) {
+ u16 *rx = dspi->rx;
+ *rx++ = (u16)data;
+ dspi->rx = rx;
+ }
+}
+
+static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
+{
+ u32 data = 0;
+
+ if (dspi->tx) {
+ const u8 *tx = dspi->tx;
+
+ data = *tx++;
+ dspi->tx = tx;
+ }
+ return data;
+}
+
+static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi)
+{
+ u32 data = 0;
+
+ if (dspi->tx) {
+ const u16 *tx = dspi->tx;
+
+ data = *tx++;
+ dspi->tx = tx;
+ }
+ return data;
+}
+
+static inline void set_io_bits(void __iomem *addr, u32 bits)
+{
+ u32 v = ioread32(addr);
+
+ v |= bits;
+ iowrite32(v, addr);
+}
+
+static inline void clear_io_bits(void __iomem *addr, u32 bits)
+{
+ u32 v = ioread32(addr);
+
+ v &= ~bits;
+ iowrite32(v, addr);
+}
+
+/*
+ * Interface to control the chip select signal
+ */
+static void davinci_spi_chipselect(struct spi_device *spi, int value)
+{
+ struct davinci_spi *dspi;
+ struct davinci_spi_config *spicfg = spi->controller_data;
+ u8 chip_sel = spi->chip_select;
+ u16 spidat1 = CS_DEFAULT;
+
+ dspi = spi_master_get_devdata(spi->master);
+
+ /* program delay transfers if tx_delay is non zero */
+ if (spicfg && spicfg->wdelay)
+ spidat1 |= SPIDAT1_WDEL;
+
+ /*
+ * Board specific chip select logic decides the polarity and cs
+ * line for the controller
+ */
+ if (spi->cs_gpiod) {
+ if (value == BITBANG_CS_ACTIVE)
+ gpiod_set_value(spi->cs_gpiod, 1);
+ else
+ gpiod_set_value(spi->cs_gpiod, 0);
+ } else {
+ if (value == BITBANG_CS_ACTIVE) {
+ if (!(spi->mode & SPI_CS_WORD))
+ spidat1 |= SPIDAT1_CSHOLD_MASK;
+ spidat1 &= ~(0x1 << chip_sel);
+ }
+ }
+
+ iowrite16(spidat1, dspi->base + SPIDAT1 + 2);
+}
+
+/**
+ * davinci_spi_get_prescale - Calculates the correct prescale value
+ * @dspi: the controller data
+ * @max_speed_hz: the maximum rate the SPI clock can run at
+ *
+ * This function calculates the prescale value that generates a clock rate
+ * less than or equal to the specified maximum.
+ *
+ * Returns: calculated prescale value for easy programming into SPI registers
+ * or negative error number if valid prescalar cannot be updated.
+ */
+static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
+ u32 max_speed_hz)
+{
+ int ret;
+
+ /* Subtract 1 to match what will be programmed into SPI register. */
+ ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz) - 1;
+
+ if (ret < dspi->prescaler_limit || ret > 255)
+ return -EINVAL;
+
+ return ret;
+}
+
+/**
+ * davinci_spi_setup_transfer - This functions will determine transfer method
+ * @spi: spi device on which data transfer to be done
+ * @t: spi transfer in which transfer info is filled
+ *
+ * This function determines data transfer method (8/16/32 bit transfer).
+ * It will also set the SPI Clock Control register according to
+ * SPI slave device freq.
+ */
+static int davinci_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+
+ struct davinci_spi *dspi;
+ struct davinci_spi_config *spicfg;
+ u8 bits_per_word = 0;
+ u32 hz = 0, spifmt = 0;
+ int prescale;
+
+ dspi = spi_master_get_devdata(spi->master);
+ spicfg = spi->controller_data;
+ if (!spicfg)
+ spicfg = &davinci_spi_default_cfg;
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ }
+
+ /* if bits_per_word is not set then set it default */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+
+ /*
+ * Assign function pointer to appropriate transfer method
+ * 8bit, 16bit or 32bit transfer
+ */
+ if (bits_per_word <= 8) {
+ dspi->get_rx = davinci_spi_rx_buf_u8;
+ dspi->get_tx = davinci_spi_tx_buf_u8;
+ dspi->bytes_per_word[spi->chip_select] = 1;
+ } else {
+ dspi->get_rx = davinci_spi_rx_buf_u16;
+ dspi->get_tx = davinci_spi_tx_buf_u16;
+ dspi->bytes_per_word[spi->chip_select] = 2;
+ }
+
+ if (!hz)
+ hz = spi->max_speed_hz;
+
+ /* Set up SPIFMTn register, unique to this chipselect. */
+
+ prescale = davinci_spi_get_prescale(dspi, hz);
+ if (prescale < 0)
+ return prescale;
+
+ spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
+
+ if (spi->mode & SPI_LSB_FIRST)
+ spifmt |= SPIFMT_SHIFTDIR_MASK;
+
+ if (spi->mode & SPI_CPOL)
+ spifmt |= SPIFMT_POLARITY_MASK;
+
+ if (!(spi->mode & SPI_CPHA))
+ spifmt |= SPIFMT_PHASE_MASK;
+
+ /*
+ * Assume wdelay is used only on SPI peripherals that has this field
+ * in SPIFMTn register and when it's configured from board file or DT.
+ */
+ if (spicfg->wdelay)
+ spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
+ & SPIFMT_WDELAY_MASK);
+
+ /*
+ * Version 1 hardware supports two basic SPI modes:
+ * - Standard SPI mode uses 4 pins, with chipselect
+ * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
+ * (distinct from SPI_3WIRE, with just one data wire;
+ * or similar variants without MOSI or without MISO)
+ *
+ * Version 2 hardware supports an optional handshaking signal,
+ * so it can support two more modes:
+ * - 5 pin SPI variant is standard SPI plus SPI_READY
+ * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
+ */
+
+ if (dspi->version == SPI_VERSION_2) {
+
+ u32 delay = 0;
+
+ if (spicfg->odd_parity)
+ spifmt |= SPIFMT_ODD_PARITY_MASK;
+
+ if (spicfg->parity_enable)
+ spifmt |= SPIFMT_PARITYENA_MASK;
+
+ if (spicfg->timer_disable) {
+ spifmt |= SPIFMT_DISTIMER_MASK;
+ } else {
+ delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
+ & SPIDELAY_C2TDELAY_MASK;
+ delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
+ & SPIDELAY_T2CDELAY_MASK;
+ }
+
+ if (spi->mode & SPI_READY) {
+ spifmt |= SPIFMT_WAITENA_MASK;
+ delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
+ & SPIDELAY_T2EDELAY_MASK;
+ delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
+ & SPIDELAY_C2EDELAY_MASK;
+ }
+
+ iowrite32(delay, dspi->base + SPIDELAY);
+ }
+
+ iowrite32(spifmt, dspi->base + SPIFMT0);
+
+ return 0;
+}
+
+static int davinci_spi_of_setup(struct spi_device *spi)
+{
+ struct davinci_spi_config *spicfg = spi->controller_data;
+ struct device_node *np = spi->dev.of_node;
+ struct davinci_spi *dspi = spi_master_get_devdata(spi->master);
+ u32 prop;
+
+ if (spicfg == NULL && np) {
+ spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL);
+ if (!spicfg)
+ return -ENOMEM;
+ *spicfg = davinci_spi_default_cfg;
+ /* override with dt configured values */
+ if (!of_property_read_u32(np, "ti,spi-wdelay", &prop))
+ spicfg->wdelay = (u8)prop;
+ spi->controller_data = spicfg;
+
+ if (dspi->dma_rx && dspi->dma_tx)
+ spicfg->io_type = SPI_IO_TYPE_DMA;
+ }
+
+ return 0;
+}
+
+/**
+ * davinci_spi_setup - This functions will set default transfer method
+ * @spi: spi device on which data transfer to be done
+ *
+ * This functions sets the default transfer method.
+ */
+static int davinci_spi_setup(struct spi_device *spi)
+{
+ struct davinci_spi *dspi;
+ struct device_node *np = spi->dev.of_node;
+ bool internal_cs = true;
+
+ dspi = spi_master_get_devdata(spi->master);
+
+ if (!(spi->mode & SPI_NO_CS)) {
+ if (np && spi->cs_gpiod)
+ internal_cs = false;
+
+ if (internal_cs)
+ set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
+ }
+
+ if (spi->mode & SPI_READY)
+ set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
+
+ if (spi->mode & SPI_LOOP)
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
+ else
+ clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
+
+ return davinci_spi_of_setup(spi);
+}
+
+static void davinci_spi_cleanup(struct spi_device *spi)
+{
+ struct davinci_spi_config *spicfg = spi->controller_data;
+
+ spi->controller_data = NULL;
+ if (spi->dev.of_node)
+ kfree(spicfg);
+}
+
+static bool davinci_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct davinci_spi_config *spicfg = spi->controller_data;
+ bool can_dma = false;
+
+ if (spicfg)
+ can_dma = (spicfg->io_type == SPI_IO_TYPE_DMA) &&
+ (xfer->len >= DMA_MIN_BYTES) &&
+ !is_vmalloc_addr(xfer->rx_buf) &&
+ !is_vmalloc_addr(xfer->tx_buf);
+
+ return can_dma;
+}
+
+static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
+{
+ struct device *sdev = dspi->bitbang.master->dev.parent;
+
+ if (int_status & SPIFLG_TIMEOUT_MASK) {
+ dev_err(sdev, "SPI Time-out Error\n");
+ return -ETIMEDOUT;
+ }
+ if (int_status & SPIFLG_DESYNC_MASK) {
+ dev_err(sdev, "SPI Desynchronization Error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_BITERR_MASK) {
+ dev_err(sdev, "SPI Bit error\n");
+ return -EIO;
+ }
+
+ if (dspi->version == SPI_VERSION_2) {
+ if (int_status & SPIFLG_DLEN_ERR_MASK) {
+ dev_err(sdev, "SPI Data Length Error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_PARERR_MASK) {
+ dev_err(sdev, "SPI Parity Error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_OVRRUN_MASK) {
+ dev_err(sdev, "SPI Data Overrun error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
+ dev_err(sdev, "SPI Buffer Init Active\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * davinci_spi_process_events - check for and handle any SPI controller events
+ * @dspi: the controller data
+ *
+ * This function will check the SPIFLG register and handle any events that are
+ * detected there
+ */
+static int davinci_spi_process_events(struct davinci_spi *dspi)
+{
+ u32 buf, status, errors = 0, spidat1;
+
+ buf = ioread32(dspi->base + SPIBUF);
+
+ if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) {
+ dspi->get_rx(buf & 0xFFFF, dspi);
+ dspi->rcount--;
+ }
+
+ status = ioread32(dspi->base + SPIFLG);
+
+ if (unlikely(status & SPIFLG_ERROR_MASK)) {
+ errors = status & SPIFLG_ERROR_MASK;
+ goto out;
+ }
+
+ if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) {
+ spidat1 = ioread32(dspi->base + SPIDAT1);
+ dspi->wcount--;
+ spidat1 &= ~0xFFFF;
+ spidat1 |= 0xFFFF & dspi->get_tx(dspi);
+ iowrite32(spidat1, dspi->base + SPIDAT1);
+ }
+
+out:
+ return errors;
+}
+
+static void davinci_spi_dma_rx_callback(void *data)
+{
+ struct davinci_spi *dspi = (struct davinci_spi *)data;
+
+ dspi->rcount = 0;
+
+ if (!dspi->wcount && !dspi->rcount)
+ complete(&dspi->done);
+}
+
+static void davinci_spi_dma_tx_callback(void *data)
+{
+ struct davinci_spi *dspi = (struct davinci_spi *)data;
+
+ dspi->wcount = 0;
+
+ if (!dspi->wcount && !dspi->rcount)
+ complete(&dspi->done);
+}
+
+/**
+ * davinci_spi_bufs - functions which will handle transfer data
+ * @spi: spi device on which data transfer to be done
+ * @t: spi transfer in which transfer info is filled
+ *
+ * This function will put data to be transferred into data register
+ * of SPI controller and then wait until the completion will be marked
+ * by the IRQ Handler.
+ */
+static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct davinci_spi *dspi;
+ int data_type, ret = -ENOMEM;
+ u32 tx_data, spidat1;
+ u32 errors = 0;
+ struct davinci_spi_config *spicfg;
+ struct davinci_spi_platform_data *pdata;
+
+ dspi = spi_master_get_devdata(spi->master);
+ pdata = &dspi->pdata;
+ spicfg = (struct davinci_spi_config *)spi->controller_data;
+ if (!spicfg)
+ spicfg = &davinci_spi_default_cfg;
+
+ /* convert len to words based on bits_per_word */
+ data_type = dspi->bytes_per_word[spi->chip_select];
+
+ dspi->tx = t->tx_buf;
+ dspi->rx = t->rx_buf;
+ dspi->wcount = t->len / data_type;
+ dspi->rcount = dspi->wcount;
+
+ spidat1 = ioread32(dspi->base + SPIDAT1);
+
+ clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+
+ reinit_completion(&dspi->done);
+
+ if (!davinci_spi_can_dma(spi->master, spi, t)) {
+ if (spicfg->io_type != SPI_IO_TYPE_POLL)
+ set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
+ /* start the transfer */
+ dspi->wcount--;
+ tx_data = dspi->get_tx(dspi);
+ spidat1 &= 0xFFFF0000;
+ spidat1 |= tx_data & 0xFFFF;
+ iowrite32(spidat1, dspi->base + SPIDAT1);
+ } else {
+ struct dma_slave_config dma_rx_conf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = (unsigned long)dspi->pbase + SPIBUF,
+ .src_addr_width = data_type,
+ .src_maxburst = 1,
+ };
+ struct dma_slave_config dma_tx_conf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
+ .dst_addr_width = data_type,
+ .dst_maxburst = 1,
+ };
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+
+ dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
+ dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
+
+ rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
+ t->rx_sg.sgl, t->rx_sg.nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto err_desc;
+
+ if (!t->tx_buf) {
+ /* To avoid errors when doing rx-only transfers with
+ * many SG entries (> 20), use the rx buffer as the
+ * dummy tx buffer so that dma reloads are done at the
+ * same time for rx and tx.
+ */
+ t->tx_sg.sgl = t->rx_sg.sgl;
+ t->tx_sg.nents = t->rx_sg.nents;
+ }
+
+ txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
+ t->tx_sg.sgl, t->tx_sg.nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto err_desc;
+
+ rxdesc->callback = davinci_spi_dma_rx_callback;
+ rxdesc->callback_param = (void *)dspi;
+ txdesc->callback = davinci_spi_dma_tx_callback;
+ txdesc->callback_param = (void *)dspi;
+
+ if (pdata->cshold_bug)
+ iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
+
+ dmaengine_submit(rxdesc);
+ dmaengine_submit(txdesc);
+
+ dma_async_issue_pending(dspi->dma_rx);
+ dma_async_issue_pending(dspi->dma_tx);
+
+ set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
+ }
+
+ /* Wait for the transfer to complete */
+ if (spicfg->io_type != SPI_IO_TYPE_POLL) {
+ if (wait_for_completion_timeout(&dspi->done, HZ) == 0)
+ errors = SPIFLG_TIMEOUT_MASK;
+ } else {
+ while (dspi->rcount > 0 || dspi->wcount > 0) {
+ errors = davinci_spi_process_events(dspi);
+ if (errors)
+ break;
+ cpu_relax();
+ }
+ }
+
+ clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
+ if (davinci_spi_can_dma(spi->master, spi, t))
+ clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
+
+ clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+
+ /*
+ * Check for bit error, desync error,parity error,timeout error and
+ * receive overflow errors
+ */
+ if (errors) {
+ ret = davinci_spi_check_error(dspi, errors);
+ WARN(!ret, "%s: error reported but no error found!\n",
+ dev_name(&spi->dev));
+ return ret;
+ }
+
+ if (dspi->rcount != 0 || dspi->wcount != 0) {
+ dev_err(&spi->dev, "SPI data transfer error\n");
+ return -EIO;
+ }
+
+ return t->len;
+
+err_desc:
+ return ret;
+}
+
+/**
+ * dummy_thread_fn - dummy thread function
+ * @irq: IRQ number for this SPI Master
+ * @data: structure for SPI Master controller davinci_spi
+ *
+ * This is to satisfy the request_threaded_irq() API so that the irq
+ * handler is called in interrupt context.
+ */
+static irqreturn_t dummy_thread_fn(s32 irq, void *data)
+{
+ return IRQ_HANDLED;
+}
+
+/**
+ * davinci_spi_irq - Interrupt handler for SPI Master Controller
+ * @irq: IRQ number for this SPI Master
+ * @data: structure for SPI Master controller davinci_spi
+ *
+ * ISR will determine that interrupt arrives either for READ or WRITE command.
+ * According to command it will do the appropriate action. It will check
+ * transfer length and if it is not zero then dispatch transfer command again.
+ * If transfer length is zero then it will indicate the COMPLETION so that
+ * davinci_spi_bufs function can go ahead.
+ */
+static irqreturn_t davinci_spi_irq(s32 irq, void *data)
+{
+ struct davinci_spi *dspi = data;
+ int status;
+
+ status = davinci_spi_process_events(dspi);
+ if (unlikely(status != 0))
+ clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
+
+ if ((!dspi->rcount && !dspi->wcount) || status)
+ complete(&dspi->done);
+
+ return IRQ_HANDLED;
+}
+
+static int davinci_spi_request_dma(struct davinci_spi *dspi)
+{
+ struct device *sdev = dspi->bitbang.master->dev.parent;
+
+ dspi->dma_rx = dma_request_chan(sdev, "rx");
+ if (IS_ERR(dspi->dma_rx))
+ return PTR_ERR(dspi->dma_rx);
+
+ dspi->dma_tx = dma_request_chan(sdev, "tx");
+ if (IS_ERR(dspi->dma_tx)) {
+ dma_release_channel(dspi->dma_rx);
+ return PTR_ERR(dspi->dma_tx);
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+
+/* OF SPI data structure */
+struct davinci_spi_of_data {
+ u8 version;
+ u8 prescaler_limit;
+};
+
+static const struct davinci_spi_of_data dm6441_spi_data = {
+ .version = SPI_VERSION_1,
+ .prescaler_limit = 2,
+};
+
+static const struct davinci_spi_of_data da830_spi_data = {
+ .version = SPI_VERSION_2,
+ .prescaler_limit = 2,
+};
+
+static const struct davinci_spi_of_data keystone_spi_data = {
+ .version = SPI_VERSION_1,
+ .prescaler_limit = 0,
+};
+
+static const struct of_device_id davinci_spi_of_match[] = {
+ {
+ .compatible = "ti,dm6441-spi",
+ .data = &dm6441_spi_data,
+ },
+ {
+ .compatible = "ti,da830-spi",
+ .data = &da830_spi_data,
+ },
+ {
+ .compatible = "ti,keystone-spi",
+ .data = &keystone_spi_data,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
+
+/**
+ * spi_davinci_get_pdata - Get platform data from DTS binding
+ * @pdev: ptr to platform data
+ * @dspi: ptr to driver data
+ *
+ * Parses and populates pdata in dspi from device tree bindings.
+ *
+ * NOTE: Not all platform data params are supported currently.
+ */
+static int spi_davinci_get_pdata(struct platform_device *pdev,
+ struct davinci_spi *dspi)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct davinci_spi_of_data *spi_data;
+ struct davinci_spi_platform_data *pdata;
+ unsigned int num_cs, intr_line = 0;
+
+ pdata = &dspi->pdata;
+
+ spi_data = device_get_match_data(&pdev->dev);
+
+ pdata->version = spi_data->version;
+ pdata->prescaler_limit = spi_data->prescaler_limit;
+ /*
+ * default num_cs is 1 and all chipsel are internal to the chip
+ * indicated by chip_sel being NULL or cs_gpios being NULL or
+ * set to -ENOENT. num-cs includes internal as well as gpios.
+ * indicated by chip_sel being NULL. GPIO based CS is not
+ * supported yet in DT bindings.
+ */
+ num_cs = 1;
+ of_property_read_u32(node, "num-cs", &num_cs);
+ pdata->num_chipselect = num_cs;
+ of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line);
+ pdata->intr_line = intr_line;
+ return 0;
+}
+#else
+static int spi_davinci_get_pdata(struct platform_device *pdev,
+ struct davinci_spi *dspi)
+{
+ return -ENODEV;
+}
+#endif
+
+/**
+ * davinci_spi_probe - probe function for SPI Master Controller
+ * @pdev: platform_device structure which contains plateform specific data
+ *
+ * According to Linux Device Model this function will be invoked by Linux
+ * with platform_device struct which contains the device specific info.
+ * This function will map the SPI controller's memory, register IRQ,
+ * Reset SPI controller and setting its registers to default value.
+ * It will invoke spi_bitbang_start to create work queue so that client driver
+ * can register transfer method to work queue.
+ */
+static int davinci_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct davinci_spi *dspi;
+ struct davinci_spi_platform_data *pdata;
+ struct resource *r;
+ int ret = 0;
+ u32 spipc0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
+ if (master == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ dspi = spi_master_get_devdata(master);
+
+ if (dev_get_platdata(&pdev->dev)) {
+ pdata = dev_get_platdata(&pdev->dev);
+ dspi->pdata = *pdata;
+ } else {
+ /* update dspi pdata with that from the DT */
+ ret = spi_davinci_get_pdata(pdev, dspi);
+ if (ret < 0)
+ goto free_master;
+ }
+
+ /* pdata in dspi is now updated and point pdata to that */
+ pdata = &dspi->pdata;
+
+ dspi->bytes_per_word = devm_kcalloc(&pdev->dev,
+ pdata->num_chipselect,
+ sizeof(*dspi->bytes_per_word),
+ GFP_KERNEL);
+ if (dspi->bytes_per_word == NULL) {
+ ret = -ENOMEM;
+ goto free_master;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENOENT;
+ goto free_master;
+ }
+
+ dspi->pbase = r->start;
+
+ dspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(dspi->base)) {
+ ret = PTR_ERR(dspi->base);
+ goto free_master;
+ }
+
+ init_completion(&dspi->done);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret == 0)
+ ret = -EINVAL;
+ if (ret < 0)
+ goto free_master;
+ dspi->irq = ret;
+
+ ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
+ dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
+ if (ret)
+ goto free_master;
+
+ dspi->bitbang.master = master;
+
+ dspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dspi->clk)) {
+ ret = -ENODEV;
+ goto free_master;
+ }
+ ret = clk_prepare_enable(dspi->clk);
+ if (ret)
+ goto free_master;
+
+ master->use_gpio_descriptors = true;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->num_chipselect = pdata->num_chipselect;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
+ master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_GPIO_SS;
+ master->setup = davinci_spi_setup;
+ master->cleanup = davinci_spi_cleanup;
+ master->can_dma = davinci_spi_can_dma;
+
+ dspi->bitbang.chipselect = davinci_spi_chipselect;
+ dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
+ dspi->prescaler_limit = pdata->prescaler_limit;
+ dspi->version = pdata->version;
+
+ dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_WORD;
+ if (dspi->version == SPI_VERSION_2)
+ dspi->bitbang.flags |= SPI_READY;
+
+ dspi->bitbang.txrx_bufs = davinci_spi_bufs;
+
+ ret = davinci_spi_request_dma(dspi);
+ if (ret == -EPROBE_DEFER) {
+ goto free_clk;
+ } else if (ret) {
+ dev_info(&pdev->dev, "DMA is not supported (%d)\n", ret);
+ dspi->dma_rx = NULL;
+ dspi->dma_tx = NULL;
+ }
+
+ dspi->get_rx = davinci_spi_rx_buf_u8;
+ dspi->get_tx = davinci_spi_tx_buf_u8;
+
+ /* Reset In/OUT SPI module */
+ iowrite32(0, dspi->base + SPIGCR0);
+ udelay(100);
+ iowrite32(1, dspi->base + SPIGCR0);
+
+ /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */
+ spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK;
+ iowrite32(spipc0, dspi->base + SPIPC0);
+
+ if (pdata->intr_line)
+ iowrite32(SPI_INTLVL_1, dspi->base + SPILVL);
+ else
+ iowrite32(SPI_INTLVL_0, dspi->base + SPILVL);
+
+ iowrite32(CS_DEFAULT, dspi->base + SPIDEF);
+
+ /* master mode default */
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+
+ ret = spi_bitbang_start(&dspi->bitbang);
+ if (ret)
+ goto free_dma;
+
+ dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base);
+
+ return ret;
+
+free_dma:
+ if (dspi->dma_rx) {
+ dma_release_channel(dspi->dma_rx);
+ dma_release_channel(dspi->dma_tx);
+ }
+free_clk:
+ clk_disable_unprepare(dspi->clk);
+free_master:
+ spi_master_put(master);
+err:
+ return ret;
+}
+
+/**
+ * davinci_spi_remove - remove function for SPI Master Controller
+ * @pdev: platform_device structure which contains plateform specific data
+ *
+ * This function will do the reverse action of davinci_spi_probe function
+ * It will free the IRQ and SPI controller's memory region.
+ * It will also call spi_bitbang_stop to destroy the work queue which was
+ * created by spi_bitbang_start.
+ */
+static int davinci_spi_remove(struct platform_device *pdev)
+{
+ struct davinci_spi *dspi;
+ struct spi_master *master;
+
+ master = platform_get_drvdata(pdev);
+ dspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&dspi->bitbang);
+
+ clk_disable_unprepare(dspi->clk);
+
+ if (dspi->dma_rx) {
+ dma_release_channel(dspi->dma_rx);
+ dma_release_channel(dspi->dma_tx);
+ }
+
+ spi_master_put(master);
+ return 0;
+}
+
+static struct platform_driver davinci_spi_driver = {
+ .driver = {
+ .name = "spi_davinci",
+ .of_match_table = of_match_ptr(davinci_spi_of_match),
+ },
+ .probe = davinci_spi_probe,
+ .remove = davinci_spi_remove,
+};
+module_platform_driver(davinci_spi_driver);
+
+MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
new file mode 100644
index 000000000..0a1fb2bc9
--- /dev/null
+++ b/drivers/spi/spi-dln2.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Diolan DLN-2 USB-SPI adapter
+ *
+ * Copyright (c) 2014 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/mfd/dln2.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+#include <asm/unaligned.h>
+
+#define DLN2_SPI_MODULE_ID 0x02
+#define DLN2_SPI_CMD(cmd) DLN2_CMD(cmd, DLN2_SPI_MODULE_ID)
+
+/* SPI commands */
+#define DLN2_SPI_GET_PORT_COUNT DLN2_SPI_CMD(0x00)
+#define DLN2_SPI_ENABLE DLN2_SPI_CMD(0x11)
+#define DLN2_SPI_DISABLE DLN2_SPI_CMD(0x12)
+#define DLN2_SPI_IS_ENABLED DLN2_SPI_CMD(0x13)
+#define DLN2_SPI_SET_MODE DLN2_SPI_CMD(0x14)
+#define DLN2_SPI_GET_MODE DLN2_SPI_CMD(0x15)
+#define DLN2_SPI_SET_FRAME_SIZE DLN2_SPI_CMD(0x16)
+#define DLN2_SPI_GET_FRAME_SIZE DLN2_SPI_CMD(0x17)
+#define DLN2_SPI_SET_FREQUENCY DLN2_SPI_CMD(0x18)
+#define DLN2_SPI_GET_FREQUENCY DLN2_SPI_CMD(0x19)
+#define DLN2_SPI_READ_WRITE DLN2_SPI_CMD(0x1A)
+#define DLN2_SPI_READ DLN2_SPI_CMD(0x1B)
+#define DLN2_SPI_WRITE DLN2_SPI_CMD(0x1C)
+#define DLN2_SPI_SET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x20)
+#define DLN2_SPI_GET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x21)
+#define DLN2_SPI_SET_DELAY_AFTER_SS DLN2_SPI_CMD(0x22)
+#define DLN2_SPI_GET_DELAY_AFTER_SS DLN2_SPI_CMD(0x23)
+#define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x24)
+#define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x25)
+#define DLN2_SPI_SET_SS DLN2_SPI_CMD(0x26)
+#define DLN2_SPI_GET_SS DLN2_SPI_CMD(0x27)
+#define DLN2_SPI_RELEASE_SS DLN2_SPI_CMD(0x28)
+#define DLN2_SPI_SS_VARIABLE_ENABLE DLN2_SPI_CMD(0x2B)
+#define DLN2_SPI_SS_VARIABLE_DISABLE DLN2_SPI_CMD(0x2C)
+#define DLN2_SPI_SS_VARIABLE_IS_ENABLED DLN2_SPI_CMD(0x2D)
+#define DLN2_SPI_SS_AAT_ENABLE DLN2_SPI_CMD(0x2E)
+#define DLN2_SPI_SS_AAT_DISABLE DLN2_SPI_CMD(0x2F)
+#define DLN2_SPI_SS_AAT_IS_ENABLED DLN2_SPI_CMD(0x30)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE DLN2_SPI_CMD(0x31)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE DLN2_SPI_CMD(0x32)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED DLN2_SPI_CMD(0x33)
+#define DLN2_SPI_SET_CPHA DLN2_SPI_CMD(0x34)
+#define DLN2_SPI_GET_CPHA DLN2_SPI_CMD(0x35)
+#define DLN2_SPI_SET_CPOL DLN2_SPI_CMD(0x36)
+#define DLN2_SPI_GET_CPOL DLN2_SPI_CMD(0x37)
+#define DLN2_SPI_SS_MULTI_ENABLE DLN2_SPI_CMD(0x38)
+#define DLN2_SPI_SS_MULTI_DISABLE DLN2_SPI_CMD(0x39)
+#define DLN2_SPI_SS_MULTI_IS_ENABLED DLN2_SPI_CMD(0x3A)
+#define DLN2_SPI_GET_SUPPORTED_MODES DLN2_SPI_CMD(0x40)
+#define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES DLN2_SPI_CMD(0x41)
+#define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES DLN2_SPI_CMD(0x42)
+#define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES DLN2_SPI_CMD(0x43)
+#define DLN2_SPI_GET_SS_COUNT DLN2_SPI_CMD(0x44)
+#define DLN2_SPI_GET_MIN_FREQUENCY DLN2_SPI_CMD(0x45)
+#define DLN2_SPI_GET_MAX_FREQUENCY DLN2_SPI_CMD(0x46)
+#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x47)
+#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x48)
+#define DLN2_SPI_GET_MIN_DELAY_AFTER_SS DLN2_SPI_CMD(0x49)
+#define DLN2_SPI_GET_MAX_DELAY_AFTER_SS DLN2_SPI_CMD(0x4A)
+#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4B)
+#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4C)
+
+#define DLN2_SPI_MAX_XFER_SIZE 256
+#define DLN2_SPI_BUF_SIZE (DLN2_SPI_MAX_XFER_SIZE + 16)
+#define DLN2_SPI_ATTR_LEAVE_SS_LOW BIT(0)
+#define DLN2_TRANSFERS_WAIT_COMPLETE 1
+#define DLN2_TRANSFERS_CANCEL 0
+#define DLN2_RPM_AUTOSUSPEND_TIMEOUT 2000
+
+struct dln2_spi {
+ struct platform_device *pdev;
+ struct spi_master *master;
+ u8 port;
+
+ /*
+ * This buffer will be used mainly for read/write operations. Since
+ * they're quite large, we cannot use the stack. Protection is not
+ * needed because all SPI communication is serialized by the SPI core.
+ */
+ void *buf;
+
+ u8 bpw;
+ u32 speed;
+ u16 mode;
+ u8 cs;
+};
+
+/*
+ * Enable/Disable SPI module. The disable command will wait for transfers to
+ * complete first.
+ */
+static int dln2_spi_enable(struct dln2_spi *dln2, bool enable)
+{
+ u16 cmd;
+ struct {
+ u8 port;
+ u8 wait_for_completion;
+ } tx;
+ unsigned len = sizeof(tx);
+
+ tx.port = dln2->port;
+
+ if (enable) {
+ cmd = DLN2_SPI_ENABLE;
+ len -= sizeof(tx.wait_for_completion);
+ } else {
+ tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE;
+ cmd = DLN2_SPI_DISABLE;
+ }
+
+ return dln2_transfer_tx(dln2->pdev, cmd, &tx, len);
+}
+
+/*
+ * Select/unselect multiple CS lines. The selected lines will be automatically
+ * toggled LOW/HIGH by the board firmware during transfers, provided they're
+ * enabled first.
+ *
+ * Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation
+ * will toggle the lines LOW/HIGH automatically.
+ */
+static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask)
+{
+ struct {
+ u8 port;
+ u8 cs;
+ } tx;
+
+ tx.port = dln2->port;
+
+ /*
+ * According to Diolan docs, "a slave device can be selected by changing
+ * the corresponding bit value to 0". The rest must be set to 1. Hence
+ * the bitwise NOT in front.
+ */
+ tx.cs = ~cs_mask;
+
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx));
+}
+
+/*
+ * Select one CS line. The other lines will be un-selected.
+ */
+static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs)
+{
+ return dln2_spi_cs_set(dln2, BIT(cs));
+}
+
+/*
+ * Enable/disable CS lines for usage. The module has to be disabled first.
+ */
+static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable)
+{
+ struct {
+ u8 port;
+ u8 cs;
+ } tx;
+ u16 cmd;
+
+ tx.port = dln2->port;
+ tx.cs = cs_mask;
+ cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE;
+
+ return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx));
+}
+
+static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable)
+{
+ u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0);
+
+ return dln2_spi_cs_enable(dln2, cs_mask, enable);
+}
+
+static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num)
+{
+ int ret;
+ struct {
+ u8 port;
+ } tx;
+ struct {
+ __le16 cs_count;
+ } rx;
+ unsigned rx_len = sizeof(rx);
+
+ tx.port = dln2->port;
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx),
+ &rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx))
+ return -EPROTO;
+
+ *cs_num = le16_to_cpu(rx.cs_count);
+
+ dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num);
+
+ return 0;
+}
+
+static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq)
+{
+ int ret;
+ struct {
+ u8 port;
+ } tx;
+ struct {
+ __le32 speed;
+ } rx;
+ unsigned rx_len = sizeof(rx);
+
+ tx.port = dln2->port;
+
+ ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx))
+ return -EPROTO;
+
+ *freq = le32_to_cpu(rx.speed);
+
+ return 0;
+}
+
+/*
+ * Get bus min/max frequencies.
+ */
+static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax)
+{
+ int ret;
+
+ ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin);
+ if (ret < 0)
+ return ret;
+
+ ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n",
+ *fmin, *fmax);
+
+ return 0;
+}
+
+/*
+ * Set the bus speed. The module will automatically round down to the closest
+ * available frequency and returns it. The module has to be disabled first.
+ */
+static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed)
+{
+ int ret;
+ struct {
+ u8 port;
+ __le32 speed;
+ } __packed tx;
+ struct {
+ __le32 speed;
+ } rx;
+ int rx_len = sizeof(rx);
+
+ tx.port = dln2->port;
+ tx.speed = cpu_to_le32(speed);
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx),
+ &rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx))
+ return -EPROTO;
+
+ return 0;
+}
+
+/*
+ * Change CPOL & CPHA. The module has to be disabled first.
+ */
+static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode)
+{
+ struct {
+ u8 port;
+ u8 mode;
+ } tx;
+
+ tx.port = dln2->port;
+ tx.mode = mode;
+
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx));
+}
+
+/*
+ * Change frame size. The module has to be disabled first.
+ */
+static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw)
+{
+ struct {
+ u8 port;
+ u8 bpw;
+ } tx;
+
+ tx.port = dln2->port;
+ tx.bpw = bpw;
+
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE,
+ &tx, sizeof(tx));
+}
+
+static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2,
+ u32 *bpw_mask)
+{
+ int ret;
+ struct {
+ u8 port;
+ } tx;
+ struct {
+ u8 count;
+ u8 frame_sizes[36];
+ } *rx = dln2->buf;
+ unsigned rx_len = sizeof(*rx);
+ int i;
+
+ tx.port = dln2->port;
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES,
+ &tx, sizeof(tx), rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(*rx))
+ return -EPROTO;
+ if (rx->count > ARRAY_SIZE(rx->frame_sizes))
+ return -EPROTO;
+
+ *bpw_mask = 0;
+ for (i = 0; i < rx->count; i++)
+ *bpw_mask |= BIT(rx->frame_sizes[i] - 1);
+
+ dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask);
+
+ return 0;
+}
+
+/*
+ * Copy the data to DLN2 buffer and change the byte order to LE, requested by
+ * DLN2 module. SPI core makes sure that the data length is a multiple of word
+ * size.
+ */
+static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw)
+{
+#ifdef __LITTLE_ENDIAN
+ memcpy(dln2_buf, src, len);
+#else
+ if (bpw <= 8) {
+ memcpy(dln2_buf, src, len);
+ } else if (bpw <= 16) {
+ __le16 *d = (__le16 *)dln2_buf;
+ u16 *s = (u16 *)src;
+
+ len = len / 2;
+ while (len--)
+ *d++ = cpu_to_le16p(s++);
+ } else {
+ __le32 *d = (__le32 *)dln2_buf;
+ u32 *s = (u32 *)src;
+
+ len = len / 4;
+ while (len--)
+ *d++ = cpu_to_le32p(s++);
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2
+ * buffer is LE ordered. SPI core makes sure that the data length is a multiple
+ * of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make
+ * sure we avoid unaligned accesses for 32 bit case.
+ */
+static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw)
+{
+#ifdef __LITTLE_ENDIAN
+ memcpy(dest, dln2_buf, len);
+#else
+ if (bpw <= 8) {
+ memcpy(dest, dln2_buf, len);
+ } else if (bpw <= 16) {
+ u16 *d = (u16 *)dest;
+ __le16 *s = (__le16 *)dln2_buf;
+
+ len = len / 2;
+ while (len--)
+ *d++ = le16_to_cpup(s++);
+ } else {
+ u32 *d = (u32 *)dest;
+ __le32 *s = (__le32 *)dln2_buf;
+
+ len = len / 4;
+ while (len--)
+ *d++ = get_unaligned_le32(s++);
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * Perform one write operation.
+ */
+static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data,
+ u16 data_len, u8 attr)
+{
+ struct {
+ u8 port;
+ __le16 size;
+ u8 attr;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *tx = dln2->buf;
+ unsigned tx_len;
+
+ BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE);
+
+ if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ tx->port = dln2->port;
+ tx->size = cpu_to_le16(data_len);
+ tx->attr = attr;
+
+ dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw);
+
+ tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len);
+}
+
+/*
+ * Perform one read operation.
+ */
+static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data,
+ u16 data_len, u8 attr)
+{
+ int ret;
+ struct {
+ u8 port;
+ __le16 size;
+ u8 attr;
+ } __packed tx;
+ struct {
+ __le16 size;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *rx = dln2->buf;
+ unsigned rx_len = sizeof(*rx);
+
+ BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE);
+
+ if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ tx.port = dln2->port;
+ tx.size = cpu_to_le16(data_len);
+ tx.attr = attr;
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx),
+ rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx->size) + data_len)
+ return -EPROTO;
+ if (le16_to_cpu(rx->size) != data_len)
+ return -EPROTO;
+
+ dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw);
+
+ return 0;
+}
+
+/*
+ * Perform one write & read operation.
+ */
+static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data,
+ u8 *rx_data, u16 data_len, u8 attr)
+{
+ int ret;
+ struct {
+ u8 port;
+ __le16 size;
+ u8 attr;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *tx;
+ struct {
+ __le16 size;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *rx;
+ unsigned tx_len, rx_len;
+
+ BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE ||
+ sizeof(*rx) > DLN2_SPI_BUF_SIZE);
+
+ if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ /*
+ * Since this is a pseudo full-duplex communication, we're perfectly
+ * safe to use the same buffer for both tx and rx. When DLN2 sends the
+ * response back, with the rx data, we don't need the tx buffer anymore.
+ */
+ tx = dln2->buf;
+ rx = dln2->buf;
+
+ tx->port = dln2->port;
+ tx->size = cpu_to_le16(data_len);
+ tx->attr = attr;
+
+ dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw);
+
+ tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
+ rx_len = sizeof(*rx);
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len,
+ rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx->size) + data_len)
+ return -EPROTO;
+ if (le16_to_cpu(rx->size) != data_len)
+ return -EPROTO;
+
+ dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw);
+
+ return 0;
+}
+
+/*
+ * Read/Write wrapper. It will automatically split an operation into multiple
+ * single ones due to device buffer constraints.
+ */
+static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data,
+ u8 *rx_data, u16 data_len, u8 attr)
+{
+ int ret;
+ u16 len;
+ u8 temp_attr;
+ u16 remaining = data_len;
+ u16 offset;
+
+ do {
+ if (remaining > DLN2_SPI_MAX_XFER_SIZE) {
+ len = DLN2_SPI_MAX_XFER_SIZE;
+ temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
+ } else {
+ len = remaining;
+ temp_attr = attr;
+ }
+
+ offset = data_len - remaining;
+
+ if (tx_data && rx_data) {
+ ret = dln2_spi_read_write_one(dln2,
+ tx_data + offset,
+ rx_data + offset,
+ len, temp_attr);
+ } else if (tx_data) {
+ ret = dln2_spi_write_one(dln2,
+ tx_data + offset,
+ len, temp_attr);
+ } else if (rx_data) {
+ ret = dln2_spi_read_one(dln2,
+ rx_data + offset,
+ len, temp_attr);
+ } else {
+ return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ remaining -= len;
+ } while (remaining);
+
+ return 0;
+}
+
+static int dln2_spi_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ int ret;
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+
+ if (dln2->cs != spi->chip_select) {
+ ret = dln2_spi_cs_set_one(dln2, spi->chip_select);
+ if (ret < 0)
+ return ret;
+
+ dln2->cs = spi->chip_select;
+ }
+
+ return 0;
+}
+
+static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed,
+ u8 bpw, u8 mode)
+{
+ int ret;
+ bool bus_setup_change;
+
+ bus_setup_change = dln2->speed != speed || dln2->mode != mode ||
+ dln2->bpw != bpw;
+
+ if (!bus_setup_change)
+ return 0;
+
+ ret = dln2_spi_enable(dln2, false);
+ if (ret < 0)
+ return ret;
+
+ if (dln2->speed != speed) {
+ ret = dln2_spi_set_speed(dln2, speed);
+ if (ret < 0)
+ return ret;
+
+ dln2->speed = speed;
+ }
+
+ if (dln2->mode != mode) {
+ ret = dln2_spi_set_mode(dln2, mode & 0x3);
+ if (ret < 0)
+ return ret;
+
+ dln2->mode = mode;
+ }
+
+ if (dln2->bpw != bpw) {
+ ret = dln2_spi_set_bpw(dln2, bpw);
+ if (ret < 0)
+ return ret;
+
+ dln2->bpw = bpw;
+ }
+
+ return dln2_spi_enable(dln2, true);
+}
+
+static int dln2_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ int status;
+ u8 attr = 0;
+
+ status = dln2_spi_transfer_setup(dln2, xfer->speed_hz,
+ xfer->bits_per_word,
+ spi->mode);
+ if (status < 0) {
+ dev_err(&dln2->pdev->dev, "Cannot setup transfer\n");
+ return status;
+ }
+
+ if (!xfer->cs_change && !spi_transfer_is_last(master, xfer))
+ attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
+
+ status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf,
+ xfer->len, attr);
+ if (status < 0)
+ dev_err(&dln2->pdev->dev, "write/read failed!\n");
+
+ return status;
+}
+
+static int dln2_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct dln2_spi *dln2;
+ struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
+ if (!master)
+ return -ENOMEM;
+
+ device_set_node(&master->dev, dev_fwnode(dev));
+
+ platform_set_drvdata(pdev, master);
+
+ dln2 = spi_master_get_devdata(master);
+
+ dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL);
+ if (!dln2->buf) {
+ ret = -ENOMEM;
+ goto exit_free_master;
+ }
+
+ dln2->master = master;
+ dln2->pdev = pdev;
+ dln2->port = pdata->port;
+ /* cs/mode can never be 0xff, so the first transfer will set them */
+ dln2->cs = 0xff;
+ dln2->mode = 0xff;
+
+ /* disable SPI module before continuing with the setup */
+ ret = dln2_spi_enable(dln2, false);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to disable SPI module\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get number of CS pins\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_get_speed_range(dln2,
+ &master->min_speed_hz,
+ &master->max_speed_hz);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to read bus min/max freqs\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_get_supported_frame_sizes(dln2,
+ &master->bits_per_word_mask);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to read supported frame sizes\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_cs_enable_all(dln2, true);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable CS pins\n");
+ goto exit_free_master;
+ }
+
+ master->bus_num = -1;
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->prepare_message = dln2_spi_prepare_message;
+ master->transfer_one = dln2_spi_transfer_one;
+ master->auto_runtime_pm = true;
+
+ /* enable SPI module, we're good to go */
+ ret = dln2_spi_enable(dln2, true);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable SPI module\n");
+ goto exit_free_master;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ DLN2_RPM_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register master\n");
+ goto exit_register;
+ }
+
+ return ret;
+
+exit_register:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ if (dln2_spi_enable(dln2, false) < 0)
+ dev_err(&pdev->dev, "Failed to disable SPI module\n");
+exit_free_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int dln2_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ if (dln2_spi_enable(dln2, false) < 0)
+ dev_err(&pdev->dev, "Failed to disable SPI module\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dln2_spi_suspend(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ ret = spi_master_suspend(master);
+ if (ret < 0)
+ return ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = dln2_spi_enable(dln2, false);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * USB power may be cut off during sleep. Resetting the following
+ * parameters will force the board to be set up before first transfer.
+ */
+ dln2->cs = 0xff;
+ dln2->speed = 0;
+ dln2->bpw = 0;
+ dln2->mode = 0xff;
+
+ return 0;
+}
+
+static int dln2_spi_resume(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = dln2_spi_cs_enable_all(dln2, true);
+ if (ret < 0)
+ return ret;
+
+ ret = dln2_spi_enable(dln2, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int dln2_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ return dln2_spi_enable(dln2, false);
+}
+
+static int dln2_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ return dln2_spi_enable(dln2, true);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops dln2_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume)
+ SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend,
+ dln2_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver spi_dln2_driver = {
+ .driver = {
+ .name = "dln2-spi",
+ .pm = &dln2_spi_pm,
+ },
+ .probe = dln2_spi_probe,
+ .remove = dln2_spi_remove,
+};
+module_platform_driver(spi_dln2_driver);
+
+MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface");
+MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dln2-spi");
diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c
new file mode 100644
index 000000000..3fb89dee5
--- /dev/null
+++ b/drivers/spi/spi-dw-bt1.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+//
+// Authors:
+// Ramil Zaripov <Ramil.Zaripov@baikalelectronics.ru>
+// Serge Semin <Sergey.Semin@baikalelectronics.ru>
+//
+// Baikal-T1 DW APB SPI and System Boot SPI driver
+//
+
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/spi/spi.h>
+
+#include "spi-dw.h"
+
+#define BT1_BOOT_DIRMAP 0
+#define BT1_BOOT_REGS 1
+
+struct dw_spi_bt1 {
+ struct dw_spi dws;
+ struct clk *clk;
+ struct mux_control *mux;
+
+#ifdef CONFIG_SPI_DW_BT1_DIRMAP
+ void __iomem *map;
+ resource_size_t map_len;
+#endif
+};
+#define to_dw_spi_bt1(_ctlr) \
+ container_of(spi_controller_get_devdata(_ctlr), struct dw_spi_bt1, dws)
+
+typedef int (*dw_spi_bt1_init_cb)(struct platform_device *pdev,
+ struct dw_spi_bt1 *dwsbt1);
+
+#ifdef CONFIG_SPI_DW_BT1_DIRMAP
+
+static int dw_spi_bt1_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller);
+
+ if (!dwsbt1->map ||
+ !dwsbt1->dws.mem_ops.supports_op(desc->mem, &desc->info.op_tmpl))
+ return -EOPNOTSUPP;
+
+ /*
+ * Make sure the requested region doesn't go out of the physically
+ * mapped flash memory bounds and the operation is read-only.
+ */
+ if (desc->info.offset + desc->info.length > dwsbt1->map_len ||
+ desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/*
+ * Directly mapped SPI memory region is only accessible in the dword chunks.
+ * That's why we have to create a dedicated read-method to copy data from there
+ * to the passed buffer.
+ */
+static void dw_spi_bt1_dirmap_copy_from_map(void *to, void __iomem *from, size_t len)
+{
+ size_t shift, chunk;
+ u32 data;
+
+ /*
+ * We split the copying up into the next three stages: unaligned head,
+ * aligned body, unaligned tail.
+ */
+ shift = (size_t)from & 0x3;
+ if (shift) {
+ chunk = min_t(size_t, 4 - shift, len);
+ data = readl_relaxed(from - shift);
+ memcpy(to, (char *)&data + shift, chunk);
+ from += chunk;
+ to += chunk;
+ len -= chunk;
+ }
+
+ while (len >= 4) {
+ data = readl_relaxed(from);
+ memcpy(to, &data, 4);
+ from += 4;
+ to += 4;
+ len -= 4;
+ }
+
+ if (len) {
+ data = readl_relaxed(from);
+ memcpy(to, &data, len);
+ }
+}
+
+static ssize_t dw_spi_bt1_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller);
+ struct dw_spi *dws = &dwsbt1->dws;
+ struct spi_mem *mem = desc->mem;
+ struct dw_spi_cfg cfg;
+ int ret;
+
+ /*
+ * Make sure the requested operation length is valid. Truncate the
+ * length if it's greater than the length of the MMIO region.
+ */
+ if (offs >= dwsbt1->map_len || !len)
+ return 0;
+
+ len = min_t(size_t, len, dwsbt1->map_len - offs);
+
+ /* Collect the controller configuration required by the operation */
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
+ cfg.dfs = 8;
+ cfg.ndf = 4;
+ cfg.freq = mem->spi->max_speed_hz;
+
+ /* Make sure the corresponding CS is de-asserted on transmission */
+ dw_spi_set_cs(mem->spi, false);
+
+ dw_spi_enable_chip(dws, 0);
+
+ dw_spi_update_config(dws, mem->spi, &cfg);
+
+ dw_spi_umask_intr(dws, DW_SPI_INT_RXFI);
+
+ dw_spi_enable_chip(dws, 1);
+
+ /*
+ * Enable the transparent mode of the System Boot Controller.
+ * The SPI core IO should have been locked before calling this method
+ * so noone would be touching the controller' registers during the
+ * dirmap operation.
+ */
+ ret = mux_control_select(dwsbt1->mux, BT1_BOOT_DIRMAP);
+ if (ret)
+ return ret;
+
+ dw_spi_bt1_dirmap_copy_from_map(buf, dwsbt1->map + offs, len);
+
+ mux_control_deselect(dwsbt1->mux);
+
+ dw_spi_set_cs(mem->spi, true);
+
+ ret = dw_spi_check_status(dws, true);
+
+ return ret ?: len;
+}
+
+#endif /* CONFIG_SPI_DW_BT1_DIRMAP */
+
+static int dw_spi_bt1_std_init(struct platform_device *pdev,
+ struct dw_spi_bt1 *dwsbt1)
+{
+ struct dw_spi *dws = &dwsbt1->dws;
+
+ dws->irq = platform_get_irq(pdev, 0);
+ if (dws->irq < 0)
+ return dws->irq;
+
+ dws->num_cs = 4;
+
+ /*
+ * Baikal-T1 Normal SPI Controllers don't always keep up with full SPI
+ * bus speed especially when it comes to the concurrent access to the
+ * APB bus resources. Thus we have no choice but to set a constraint on
+ * the SPI bus frequency for the memory operations which require to
+ * read/write data as fast as possible.
+ */
+ dws->max_mem_freq = 20000000U;
+
+ dw_spi_dma_setup_generic(dws);
+
+ return 0;
+}
+
+static int dw_spi_bt1_sys_init(struct platform_device *pdev,
+ struct dw_spi_bt1 *dwsbt1)
+{
+ struct resource *mem __maybe_unused;
+ struct dw_spi *dws = &dwsbt1->dws;
+
+ /*
+ * Baikal-T1 System Boot Controller is equipped with a mux, which
+ * switches between the directly mapped SPI flash access mode and
+ * IO access to the DW APB SSI registers. Note the mux controller
+ * must be setup to preserve the registers being accessible by default
+ * (on idle-state).
+ */
+ dwsbt1->mux = devm_mux_control_get(&pdev->dev, NULL);
+ if (IS_ERR(dwsbt1->mux))
+ return PTR_ERR(dwsbt1->mux);
+
+ /*
+ * Directly mapped SPI flash memory is a 16MB MMIO region, which can be
+ * used to access a peripheral memory device just by reading/writing
+ * data from/to it. Note the system APB bus will stall during each IO
+ * from/to the dirmap region until the operation is finished. So don't
+ * use it concurrently with time-critical tasks (like the SPI memory
+ * operations implemented in the DW APB SSI driver).
+ */
+#ifdef CONFIG_SPI_DW_BT1_DIRMAP
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (mem) {
+ dwsbt1->map = devm_ioremap_resource(&pdev->dev, mem);
+ if (!IS_ERR(dwsbt1->map)) {
+ dwsbt1->map_len = resource_size(mem);
+ dws->mem_ops.dirmap_create = dw_spi_bt1_dirmap_create;
+ dws->mem_ops.dirmap_read = dw_spi_bt1_dirmap_read;
+ } else {
+ dwsbt1->map = NULL;
+ }
+ }
+#endif /* CONFIG_SPI_DW_BT1_DIRMAP */
+
+ /*
+ * There is no IRQ, no DMA and just one CS available on the System Boot
+ * SPI controller.
+ */
+ dws->irq = IRQ_NOTCONNECTED;
+ dws->num_cs = 1;
+
+ /*
+ * Baikal-T1 System Boot SPI Controller doesn't keep up with the full
+ * SPI bus speed due to relatively slow APB bus and races for it'
+ * resources from different CPUs. The situation is worsen by a small
+ * FIFOs depth (just 8 words). It works better in a single CPU mode
+ * though, but still tends to be not fast enough at low CPU
+ * frequencies.
+ */
+ if (num_possible_cpus() > 1)
+ dws->max_mem_freq = 10000000U;
+ else
+ dws->max_mem_freq = 20000000U;
+
+ return 0;
+}
+
+static int dw_spi_bt1_probe(struct platform_device *pdev)
+{
+ dw_spi_bt1_init_cb init_func;
+ struct dw_spi_bt1 *dwsbt1;
+ struct resource *mem;
+ struct dw_spi *dws;
+ int ret;
+
+ dwsbt1 = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_bt1), GFP_KERNEL);
+ if (!dwsbt1)
+ return -ENOMEM;
+
+ dws = &dwsbt1->dws;
+
+ dws->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+ if (IS_ERR(dws->regs))
+ return PTR_ERR(dws->regs);
+
+ dws->paddr = mem->start;
+
+ dwsbt1->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dwsbt1->clk))
+ return PTR_ERR(dwsbt1->clk);
+
+ ret = clk_prepare_enable(dwsbt1->clk);
+ if (ret)
+ return ret;
+
+ dws->bus_num = pdev->id;
+ dws->reg_io_width = 4;
+ dws->max_freq = clk_get_rate(dwsbt1->clk);
+ if (!dws->max_freq) {
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
+ init_func = device_get_match_data(&pdev->dev);
+ ret = init_func(pdev, dwsbt1);
+ if (ret)
+ goto err_disable_clk;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = dw_spi_add_host(&pdev->dev, dws);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ goto err_disable_clk;
+ }
+
+ platform_set_drvdata(pdev, dwsbt1);
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(dwsbt1->clk);
+
+ return ret;
+}
+
+static int dw_spi_bt1_remove(struct platform_device *pdev)
+{
+ struct dw_spi_bt1 *dwsbt1 = platform_get_drvdata(pdev);
+
+ dw_spi_remove_host(&dwsbt1->dws);
+
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(dwsbt1->clk);
+
+ return 0;
+}
+
+static const struct of_device_id dw_spi_bt1_of_match[] = {
+ { .compatible = "baikal,bt1-ssi", .data = dw_spi_bt1_std_init},
+ { .compatible = "baikal,bt1-sys-ssi", .data = dw_spi_bt1_sys_init},
+ { }
+};
+MODULE_DEVICE_TABLE(of, dw_spi_bt1_of_match);
+
+static struct platform_driver dw_spi_bt1_driver = {
+ .probe = dw_spi_bt1_probe,
+ .remove = dw_spi_bt1_remove,
+ .driver = {
+ .name = "bt1-sys-ssi",
+ .of_match_table = dw_spi_bt1_of_match,
+ },
+};
+module_platform_driver(dw_spi_bt1_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 System Boot SPI Controller driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(SPI_DW_CORE);
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
new file mode 100644
index 000000000..4976e3b89
--- /dev/null
+++ b/drivers/spi/spi-dw-core.c
@@ -0,0 +1,1017 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Designware SPI core controller driver (refer pxa2xx_spi.c)
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/string.h>
+#include <linux/of.h>
+
+#include "spi-dw.h"
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+
+/* Slave spi_device related */
+struct dw_spi_chip_data {
+ u32 cr0;
+ u32 rx_sample_dly; /* RX sample delay */
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+#define DW_SPI_DBGFS_REG(_name, _off) \
+{ \
+ .name = _name, \
+ .offset = _off, \
+}
+
+static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
+ DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
+ DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
+ DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
+ DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
+ DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
+ DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
+ DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
+ DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
+ DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
+ DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
+ DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
+ DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
+ DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
+ DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
+ DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
+ DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
+};
+
+static int dw_spi_debugfs_init(struct dw_spi *dws)
+{
+ char name[32];
+
+ snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
+ dws->debugfs = debugfs_create_dir(name, NULL);
+ if (!dws->debugfs)
+ return -ENOMEM;
+
+ dws->regset.regs = dw_spi_dbgfs_regs;
+ dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
+ dws->regset.base = dws->regs;
+ debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
+
+ return 0;
+}
+
+static void dw_spi_debugfs_remove(struct dw_spi *dws)
+{
+ debugfs_remove_recursive(dws->debugfs);
+}
+
+#else
+static inline int dw_spi_debugfs_init(struct dw_spi *dws)
+{
+ return 0;
+}
+
+static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+void dw_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
+ bool cs_high = !!(spi->mode & SPI_CS_HIGH);
+
+ /*
+ * DW SPI controller demands any native CS being set in order to
+ * proceed with data transfer. So in order to activate the SPI
+ * communications we must set a corresponding bit in the Slave
+ * Enable register no matter whether the SPI core is configured to
+ * support active-high or active-low CS level.
+ */
+ if (cs_high == enable)
+ dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
+ else
+ dw_writel(dws, DW_SPI_SER, 0);
+}
+EXPORT_SYMBOL_NS_GPL(dw_spi_set_cs, SPI_DW_CORE);
+
+/* Return the max entries we can fill into tx fifo */
+static inline u32 dw_spi_tx_max(struct dw_spi *dws)
+{
+ u32 tx_room, rxtx_gap;
+
+ tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
+
+ /*
+ * Another concern is about the tx/rx mismatch, we
+ * though to use (dws->fifo_len - rxflr - txflr) as
+ * one maximum value for tx, but it doesn't cover the
+ * data which is out of tx/rx fifo and inside the
+ * shift registers. So a control from sw point of
+ * view is taken.
+ */
+ rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
+
+ return min3((u32)dws->tx_len, tx_room, rxtx_gap);
+}
+
+/* Return the max entries we should read out of rx fifo */
+static inline u32 dw_spi_rx_max(struct dw_spi *dws)
+{
+ return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
+}
+
+static void dw_writer(struct dw_spi *dws)
+{
+ u32 max = dw_spi_tx_max(dws);
+ u32 txw = 0;
+
+ while (max--) {
+ if (dws->tx) {
+ if (dws->n_bytes == 1)
+ txw = *(u8 *)(dws->tx);
+ else if (dws->n_bytes == 2)
+ txw = *(u16 *)(dws->tx);
+ else
+ txw = *(u32 *)(dws->tx);
+
+ dws->tx += dws->n_bytes;
+ }
+ dw_write_io_reg(dws, DW_SPI_DR, txw);
+ --dws->tx_len;
+ }
+}
+
+static void dw_reader(struct dw_spi *dws)
+{
+ u32 max = dw_spi_rx_max(dws);
+ u32 rxw;
+
+ while (max--) {
+ rxw = dw_read_io_reg(dws, DW_SPI_DR);
+ if (dws->rx) {
+ if (dws->n_bytes == 1)
+ *(u8 *)(dws->rx) = rxw;
+ else if (dws->n_bytes == 2)
+ *(u16 *)(dws->rx) = rxw;
+ else
+ *(u32 *)(dws->rx) = rxw;
+
+ dws->rx += dws->n_bytes;
+ }
+ --dws->rx_len;
+ }
+}
+
+int dw_spi_check_status(struct dw_spi *dws, bool raw)
+{
+ u32 irq_status;
+ int ret = 0;
+
+ if (raw)
+ irq_status = dw_readl(dws, DW_SPI_RISR);
+ else
+ irq_status = dw_readl(dws, DW_SPI_ISR);
+
+ if (irq_status & DW_SPI_INT_RXOI) {
+ dev_err(&dws->master->dev, "RX FIFO overflow detected\n");
+ ret = -EIO;
+ }
+
+ if (irq_status & DW_SPI_INT_RXUI) {
+ dev_err(&dws->master->dev, "RX FIFO underflow detected\n");
+ ret = -EIO;
+ }
+
+ if (irq_status & DW_SPI_INT_TXOI) {
+ dev_err(&dws->master->dev, "TX FIFO overflow detected\n");
+ ret = -EIO;
+ }
+
+ /* Generically handle the erroneous situation */
+ if (ret) {
+ dw_spi_reset_chip(dws);
+ if (dws->master->cur_msg)
+ dws->master->cur_msg->status = ret;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(dw_spi_check_status, SPI_DW_CORE);
+
+static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
+{
+ u16 irq_status = dw_readl(dws, DW_SPI_ISR);
+
+ if (dw_spi_check_status(dws, false)) {
+ spi_finalize_current_transfer(dws->master);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * Read data from the Rx FIFO every time we've got a chance executing
+ * this method. If there is nothing left to receive, terminate the
+ * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
+ * final stage of the transfer. By doing so we'll get the next IRQ
+ * right when the leftover incoming data is received.
+ */
+ dw_reader(dws);
+ if (!dws->rx_len) {
+ dw_spi_mask_intr(dws, 0xff);
+ spi_finalize_current_transfer(dws->master);
+ } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
+ dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
+ }
+
+ /*
+ * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
+ * disabled after the data transmission is finished so not to
+ * have the TXE IRQ flood at the final stage of the transfer.
+ */
+ if (irq_status & DW_SPI_INT_TXEI) {
+ dw_writer(dws);
+ if (!dws->tx_len)
+ dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dw_spi_irq(int irq, void *dev_id)
+{
+ struct spi_controller *master = dev_id;
+ struct dw_spi *dws = spi_controller_get_devdata(master);
+ u16 irq_status = dw_readl(dws, DW_SPI_ISR) & DW_SPI_INT_MASK;
+
+ if (!irq_status)
+ return IRQ_NONE;
+
+ if (!master->cur_msg) {
+ dw_spi_mask_intr(dws, 0xff);
+ return IRQ_HANDLED;
+ }
+
+ return dws->transfer_handler(dws);
+}
+
+static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
+{
+ u32 cr0 = 0;
+
+ if (dw_spi_ip_is(dws, PSSI)) {
+ /* CTRLR0[ 5: 4] Frame Format */
+ cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
+
+ /*
+ * SPI mode (SCPOL|SCPH)
+ * CTRLR0[ 6] Serial Clock Phase
+ * CTRLR0[ 7] Serial Clock Polarity
+ */
+ if (spi->mode & SPI_CPOL)
+ cr0 |= DW_PSSI_CTRLR0_SCPOL;
+ if (spi->mode & SPI_CPHA)
+ cr0 |= DW_PSSI_CTRLR0_SCPHA;
+
+ /* CTRLR0[11] Shift Register Loop */
+ if (spi->mode & SPI_LOOP)
+ cr0 |= DW_PSSI_CTRLR0_SRL;
+ } else {
+ /* CTRLR0[ 7: 6] Frame Format */
+ cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
+
+ /*
+ * SPI mode (SCPOL|SCPH)
+ * CTRLR0[ 8] Serial Clock Phase
+ * CTRLR0[ 9] Serial Clock Polarity
+ */
+ if (spi->mode & SPI_CPOL)
+ cr0 |= DW_HSSI_CTRLR0_SCPOL;
+ if (spi->mode & SPI_CPHA)
+ cr0 |= DW_HSSI_CTRLR0_SCPHA;
+
+ /* CTRLR0[13] Shift Register Loop */
+ if (spi->mode & SPI_LOOP)
+ cr0 |= DW_HSSI_CTRLR0_SRL;
+
+ /* CTRLR0[31] MST */
+ if (dw_spi_ver_is_ge(dws, HSSI, 102A))
+ cr0 |= DW_HSSI_CTRLR0_MST;
+ }
+
+ return cr0;
+}
+
+void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
+ struct dw_spi_cfg *cfg)
+{
+ struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
+ u32 cr0 = chip->cr0;
+ u32 speed_hz;
+ u16 clk_div;
+
+ /* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */
+ cr0 |= (cfg->dfs - 1) << dws->dfs_offset;
+
+ if (dw_spi_ip_is(dws, PSSI))
+ /* CTRLR0[ 9:8] Transfer Mode */
+ cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_TMOD_MASK, cfg->tmode);
+ else
+ /* CTRLR0[11:10] Transfer Mode */
+ cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_TMOD_MASK, cfg->tmode);
+
+ dw_writel(dws, DW_SPI_CTRLR0, cr0);
+
+ if (cfg->tmode == DW_SPI_CTRLR0_TMOD_EPROMREAD ||
+ cfg->tmode == DW_SPI_CTRLR0_TMOD_RO)
+ dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
+
+ /* Note DW APB SSI clock divider doesn't support odd numbers */
+ clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
+ speed_hz = dws->max_freq / clk_div;
+
+ if (dws->current_freq != speed_hz) {
+ dw_spi_set_clk(dws, clk_div);
+ dws->current_freq = speed_hz;
+ }
+
+ /* Update RX sample delay if required */
+ if (dws->cur_rx_sample_dly != chip->rx_sample_dly) {
+ dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly);
+ dws->cur_rx_sample_dly = chip->rx_sample_dly;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(dw_spi_update_config, SPI_DW_CORE);
+
+static void dw_spi_irq_setup(struct dw_spi *dws)
+{
+ u16 level;
+ u8 imask;
+
+ /*
+ * Originally Tx and Rx data lengths match. Rx FIFO Threshold level
+ * will be adjusted at the final stage of the IRQ-based SPI transfer
+ * execution so not to lose the leftover of the incoming data.
+ */
+ level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
+ dw_writel(dws, DW_SPI_TXFTLR, level);
+ dw_writel(dws, DW_SPI_RXFTLR, level - 1);
+
+ dws->transfer_handler = dw_spi_transfer_handler;
+
+ imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI |
+ DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
+ dw_spi_umask_intr(dws, imask);
+}
+
+/*
+ * The iterative procedure of the poll-based transfer is simple: write as much
+ * as possible to the Tx FIFO, wait until the pending to receive data is ready
+ * to be read, read it from the Rx FIFO and check whether the performed
+ * procedure has been successful.
+ *
+ * Note this method the same way as the IRQ-based transfer won't work well for
+ * the SPI devices connected to the controller with native CS due to the
+ * automatic CS assertion/de-assertion.
+ */
+static int dw_spi_poll_transfer(struct dw_spi *dws,
+ struct spi_transfer *transfer)
+{
+ struct spi_delay delay;
+ u16 nbits;
+ int ret;
+
+ delay.unit = SPI_DELAY_UNIT_SCK;
+ nbits = dws->n_bytes * BITS_PER_BYTE;
+
+ do {
+ dw_writer(dws);
+
+ delay.value = nbits * (dws->rx_len - dws->tx_len);
+ spi_delay_exec(&delay, transfer);
+
+ dw_reader(dws);
+
+ ret = dw_spi_check_status(dws, true);
+ if (ret)
+ return ret;
+ } while (dws->rx_len);
+
+ return 0;
+}
+
+static int dw_spi_transfer_one(struct spi_controller *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct dw_spi *dws = spi_controller_get_devdata(master);
+ struct dw_spi_cfg cfg = {
+ .tmode = DW_SPI_CTRLR0_TMOD_TR,
+ .dfs = transfer->bits_per_word,
+ .freq = transfer->speed_hz,
+ };
+ int ret;
+
+ dws->dma_mapped = 0;
+ dws->n_bytes =
+ roundup_pow_of_two(DIV_ROUND_UP(transfer->bits_per_word,
+ BITS_PER_BYTE));
+
+ dws->tx = (void *)transfer->tx_buf;
+ dws->tx_len = transfer->len / dws->n_bytes;
+ dws->rx = transfer->rx_buf;
+ dws->rx_len = dws->tx_len;
+
+ /* Ensure the data above is visible for all CPUs */
+ smp_mb();
+
+ dw_spi_enable_chip(dws, 0);
+
+ dw_spi_update_config(dws, spi, &cfg);
+
+ transfer->effective_speed_hz = dws->current_freq;
+
+ /* Check if current transfer is a DMA transaction */
+ if (master->can_dma && master->can_dma(master, spi, transfer))
+ dws->dma_mapped = master->cur_msg_mapped;
+
+ /* For poll mode just disable all interrupts */
+ dw_spi_mask_intr(dws, 0xff);
+
+ if (dws->dma_mapped) {
+ ret = dws->dma_ops->dma_setup(dws, transfer);
+ if (ret)
+ return ret;
+ }
+
+ dw_spi_enable_chip(dws, 1);
+
+ if (dws->dma_mapped)
+ return dws->dma_ops->dma_transfer(dws, transfer);
+ else if (dws->irq == IRQ_NOTCONNECTED)
+ return dw_spi_poll_transfer(dws, transfer);
+
+ dw_spi_irq_setup(dws);
+
+ return 1;
+}
+
+static void dw_spi_handle_err(struct spi_controller *master,
+ struct spi_message *msg)
+{
+ struct dw_spi *dws = spi_controller_get_devdata(master);
+
+ if (dws->dma_mapped)
+ dws->dma_ops->dma_stop(dws);
+
+ dw_spi_reset_chip(dws);
+}
+
+static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ op->data.nbytes = clamp_val(op->data.nbytes, 0, DW_SPI_NDF_MASK + 1);
+
+ return 0;
+}
+
+static bool dw_spi_supports_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
+ op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
+ return false;
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
+{
+ unsigned int i, j, len;
+ u8 *out;
+
+ /*
+ * Calculate the total length of the EEPROM command transfer and
+ * either use the pre-allocated buffer or create a temporary one.
+ */
+ len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ len += op->data.nbytes;
+
+ if (len <= DW_SPI_BUF_SIZE) {
+ out = dws->buf;
+ } else {
+ out = kzalloc(len, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+ }
+
+ /*
+ * Collect the operation code, address and dummy bytes into the single
+ * buffer. If it's a transfer with data to be sent, also copy it into the
+ * single buffer in order to speed the data transmission up.
+ */
+ for (i = 0; i < op->cmd.nbytes; ++i)
+ out[i] = DW_SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
+ for (j = 0; j < op->addr.nbytes; ++i, ++j)
+ out[i] = DW_SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
+ for (j = 0; j < op->dummy.nbytes; ++i, ++j)
+ out[i] = 0x0;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ memcpy(&out[i], op->data.buf.out, op->data.nbytes);
+
+ dws->n_bytes = 1;
+ dws->tx = out;
+ dws->tx_len = len;
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dws->rx = op->data.buf.in;
+ dws->rx_len = op->data.nbytes;
+ } else {
+ dws->rx = NULL;
+ dws->rx_len = 0;
+ }
+
+ return 0;
+}
+
+static void dw_spi_free_mem_buf(struct dw_spi *dws)
+{
+ if (dws->tx != dws->buf)
+ kfree(dws->tx);
+}
+
+static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
+{
+ u32 room, entries, sts;
+ unsigned int len;
+ u8 *buf;
+
+ /*
+ * At initial stage we just pre-fill the Tx FIFO in with no rush,
+ * since native CS hasn't been enabled yet and the automatic data
+ * transmission won't start til we do that.
+ */
+ len = min(dws->fifo_len, dws->tx_len);
+ buf = dws->tx;
+ while (len--)
+ dw_write_io_reg(dws, DW_SPI_DR, *buf++);
+
+ /*
+ * After setting any bit in the SER register the transmission will
+ * start automatically. We have to keep up with that procedure
+ * otherwise the CS de-assertion will happen whereupon the memory
+ * operation will be pre-terminated.
+ */
+ len = dws->tx_len - ((void *)buf - dws->tx);
+ dw_spi_set_cs(spi, false);
+ while (len) {
+ entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
+ if (!entries) {
+ dev_err(&dws->master->dev, "CS de-assertion on Tx\n");
+ return -EIO;
+ }
+ room = min(dws->fifo_len - entries, len);
+ for (; room; --room, --len)
+ dw_write_io_reg(dws, DW_SPI_DR, *buf++);
+ }
+
+ /*
+ * Data fetching will start automatically if the EEPROM-read mode is
+ * activated. We have to keep up with the incoming data pace to
+ * prevent the Rx FIFO overflow causing the inbound data loss.
+ */
+ len = dws->rx_len;
+ buf = dws->rx;
+ while (len) {
+ entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
+ if (!entries) {
+ sts = readl_relaxed(dws->regs + DW_SPI_RISR);
+ if (sts & DW_SPI_INT_RXOI) {
+ dev_err(&dws->master->dev, "FIFO overflow on Rx\n");
+ return -EIO;
+ }
+ continue;
+ }
+ entries = min(entries, len);
+ for (; entries; --entries, --len)
+ *buf++ = dw_read_io_reg(dws, DW_SPI_DR);
+ }
+
+ return 0;
+}
+
+static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
+{
+ return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
+}
+
+static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
+{
+ int retry = DW_SPI_WAIT_RETRIES;
+ struct spi_delay delay;
+ unsigned long ns, us;
+ u32 nents;
+
+ nents = dw_readl(dws, DW_SPI_TXFLR);
+ ns = NSEC_PER_SEC / dws->current_freq * nents;
+ ns *= dws->n_bytes * BITS_PER_BYTE;
+ if (ns <= NSEC_PER_USEC) {
+ delay.unit = SPI_DELAY_UNIT_NSECS;
+ delay.value = ns;
+ } else {
+ us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
+ delay.unit = SPI_DELAY_UNIT_USECS;
+ delay.value = clamp_val(us, 0, USHRT_MAX);
+ }
+
+ while (dw_spi_ctlr_busy(dws) && retry--)
+ spi_delay_exec(&delay, NULL);
+
+ if (retry < 0) {
+ dev_err(&dws->master->dev, "Mem op hanged up\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
+{
+ dw_spi_enable_chip(dws, 0);
+ dw_spi_set_cs(spi, true);
+ dw_spi_enable_chip(dws, 1);
+}
+
+/*
+ * The SPI memory operation implementation below is the best choice for the
+ * devices, which are selected by the native chip-select lane. It's
+ * specifically developed to workaround the problem with automatic chip-select
+ * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
+ * SPI-mem core calls exec_op() callback only if the GPIO-based CS is
+ * unavailable.
+ */
+static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller);
+ struct dw_spi_cfg cfg;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Collect the outbound data into a single buffer to speed the
+ * transmission up at least on the initial stage.
+ */
+ ret = dw_spi_init_mem_buf(dws, op);
+ if (ret)
+ return ret;
+
+ /*
+ * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
+ * operation. Transmit-only mode is suitable for the rest of them.
+ */
+ cfg.dfs = 8;
+ cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
+ cfg.ndf = op->data.nbytes;
+ } else {
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
+ }
+
+ dw_spi_enable_chip(dws, 0);
+
+ dw_spi_update_config(dws, mem->spi, &cfg);
+
+ dw_spi_mask_intr(dws, 0xff);
+
+ dw_spi_enable_chip(dws, 1);
+
+ /*
+ * DW APB SSI controller has very nasty peculiarities. First originally
+ * (without any vendor-specific modifications) it doesn't provide a
+ * direct way to set and clear the native chip-select signal. Instead
+ * the controller asserts the CS lane if Tx FIFO isn't empty and a
+ * transmission is going on, and automatically de-asserts it back to
+ * the high level if the Tx FIFO doesn't have anything to be pushed
+ * out. Due to that a multi-tasking or heavy IRQs activity might be
+ * fatal, since the transfer procedure preemption may cause the Tx FIFO
+ * getting empty and sudden CS de-assertion, which in the middle of the
+ * transfer will most likely cause the data loss. Secondly the
+ * EEPROM-read or Read-only DW SPI transfer modes imply the incoming
+ * data being automatically pulled in into the Rx FIFO. So if the
+ * driver software is late in fetching the data from the FIFO before
+ * it's overflown, new incoming data will be lost. In order to make
+ * sure the executed memory operations are CS-atomic and to prevent the
+ * Rx FIFO overflow we have to disable the local interrupts so to block
+ * any preemption during the subsequent IO operations.
+ *
+ * Note. At some circumstances disabling IRQs may not help to prevent
+ * the problems described above. The CS de-assertion and Rx FIFO
+ * overflow may still happen due to the relatively slow system bus or
+ * CPU not working fast enough, so the write-then-read algo implemented
+ * here just won't keep up with the SPI bus data transfer. Such
+ * situation is highly platform specific and is supposed to be fixed by
+ * manually restricting the SPI bus frequency using the
+ * dws->max_mem_freq parameter.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+
+ ret = dw_spi_write_then_read(dws, mem->spi);
+
+ local_irq_restore(flags);
+ preempt_enable();
+
+ /*
+ * Wait for the operation being finished and check the controller
+ * status only if there hasn't been any run-time error detected. In the
+ * former case it's just pointless. In the later one to prevent an
+ * additional error message printing since any hw error flag being set
+ * would be due to an error detected on the data transfer.
+ */
+ if (!ret) {
+ ret = dw_spi_wait_mem_op_done(dws);
+ if (!ret)
+ ret = dw_spi_check_status(dws, true);
+ }
+
+ dw_spi_stop_mem_op(dws, mem->spi);
+
+ dw_spi_free_mem_buf(dws);
+
+ return ret;
+}
+
+/*
+ * Initialize the default memory operations if a glue layer hasn't specified
+ * custom ones. Direct mapping operations will be preserved anyway since DW SPI
+ * controller doesn't have an embedded dirmap interface. Note the memory
+ * operations implemented in this driver is the best choice only for the DW APB
+ * SSI controller with standard native CS functionality. If a hardware vendor
+ * has fixed the automatic CS assertion/de-assertion peculiarity, then it will
+ * be safer to use the normal SPI-messages-based transfers implementation.
+ */
+static void dw_spi_init_mem_ops(struct dw_spi *dws)
+{
+ if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
+ !dws->set_cs) {
+ dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
+ dws->mem_ops.supports_op = dw_spi_supports_mem_op;
+ dws->mem_ops.exec_op = dw_spi_exec_mem_op;
+ if (!dws->max_mem_freq)
+ dws->max_mem_freq = dws->max_freq;
+ }
+}
+
+/* This may be called twice for each spi dev */
+static int dw_spi_setup(struct spi_device *spi)
+{
+ struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
+ struct dw_spi_chip_data *chip;
+
+ /* Only alloc on first setup */
+ chip = spi_get_ctldata(spi);
+ if (!chip) {
+ struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
+ u32 rx_sample_dly_ns;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+ spi_set_ctldata(spi, chip);
+ /* Get specific / default rx-sample-delay */
+ if (device_property_read_u32(&spi->dev,
+ "rx-sample-delay-ns",
+ &rx_sample_dly_ns) != 0)
+ /* Use default controller value */
+ rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
+ chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
+ NSEC_PER_SEC /
+ dws->max_freq);
+ }
+
+ /*
+ * Update CR0 data each time the setup callback is invoked since
+ * the device parameters could have been changed, for instance, by
+ * the MMC SPI driver or something else.
+ */
+ chip->cr0 = dw_spi_prepare_cr0(dws, spi);
+
+ return 0;
+}
+
+static void dw_spi_cleanup(struct spi_device *spi)
+{
+ struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
+
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+}
+
+/* Restart the controller, disable all interrupts, clean rx fifo */
+static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
+{
+ dw_spi_reset_chip(dws);
+
+ /*
+ * Retrieve the Synopsys component version if it hasn't been specified
+ * by the platform. CoreKit version ID is encoded as a 3-chars ASCII
+ * code enclosed with '*' (typical for the most of Synopsys IP-cores).
+ */
+ if (!dws->ver) {
+ dws->ver = dw_readl(dws, DW_SPI_VERSION);
+
+ dev_dbg(dev, "Synopsys DWC%sSSI v%c.%c%c\n",
+ dw_spi_ip_is(dws, PSSI) ? " APB " : " ",
+ DW_SPI_GET_BYTE(dws->ver, 3), DW_SPI_GET_BYTE(dws->ver, 2),
+ DW_SPI_GET_BYTE(dws->ver, 1));
+ }
+
+ /*
+ * Try to detect the FIFO depth if not set by interface driver,
+ * the depth could be from 2 to 256 from HW spec
+ */
+ if (!dws->fifo_len) {
+ u32 fifo;
+
+ for (fifo = 1; fifo < 256; fifo++) {
+ dw_writel(dws, DW_SPI_TXFTLR, fifo);
+ if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
+ break;
+ }
+ dw_writel(dws, DW_SPI_TXFTLR, 0);
+
+ dws->fifo_len = (fifo == 1) ? 0 : fifo;
+ dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
+ }
+
+ /*
+ * Detect CTRLR0.DFS field size and offset by testing the lowest bits
+ * writability. Note DWC SSI controller also has the extended DFS, but
+ * with zero offset.
+ */
+ if (dw_spi_ip_is(dws, PSSI)) {
+ u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0);
+
+ dw_spi_enable_chip(dws, 0);
+ dw_writel(dws, DW_SPI_CTRLR0, 0xffffffff);
+ cr0 = dw_readl(dws, DW_SPI_CTRLR0);
+ dw_writel(dws, DW_SPI_CTRLR0, tmp);
+ dw_spi_enable_chip(dws, 1);
+
+ if (!(cr0 & DW_PSSI_CTRLR0_DFS_MASK)) {
+ dws->caps |= DW_SPI_CAP_DFS32;
+ dws->dfs_offset = __bf_shf(DW_PSSI_CTRLR0_DFS32_MASK);
+ dev_dbg(dev, "Detected 32-bits max data frame size\n");
+ }
+ } else {
+ dws->caps |= DW_SPI_CAP_DFS32;
+ }
+
+ /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
+ if (dws->caps & DW_SPI_CAP_CS_OVERRIDE)
+ dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
+}
+
+int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
+{
+ struct spi_controller *master;
+ int ret;
+
+ if (!dws)
+ return -EINVAL;
+
+ master = spi_alloc_master(dev, 0);
+ if (!master)
+ return -ENOMEM;
+
+ device_set_node(&master->dev, dev_fwnode(dev));
+
+ dws->master = master;
+ dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
+
+ spi_controller_set_devdata(master, dws);
+
+ /* Basic HW init */
+ dw_spi_hw_init(dev, dws);
+
+ ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
+ master);
+ if (ret < 0 && ret != -ENOTCONN) {
+ dev_err(dev, "can not get IRQ\n");
+ goto err_free_master;
+ }
+
+ dw_spi_init_mem_ops(dws);
+
+ master->use_gpio_descriptors = true;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
+ if (dws->caps & DW_SPI_CAP_DFS32)
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ else
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ master->bus_num = dws->bus_num;
+ master->num_chipselect = dws->num_cs;
+ master->setup = dw_spi_setup;
+ master->cleanup = dw_spi_cleanup;
+ if (dws->set_cs)
+ master->set_cs = dws->set_cs;
+ else
+ master->set_cs = dw_spi_set_cs;
+ master->transfer_one = dw_spi_transfer_one;
+ master->handle_err = dw_spi_handle_err;
+ if (dws->mem_ops.exec_op)
+ master->mem_ops = &dws->mem_ops;
+ master->max_speed_hz = dws->max_freq;
+ master->flags = SPI_MASTER_GPIO_SS;
+ master->auto_runtime_pm = true;
+
+ /* Get default rx sample delay */
+ device_property_read_u32(dev, "rx-sample-delay-ns",
+ &dws->def_rx_sample_dly_ns);
+
+ if (dws->dma_ops && dws->dma_ops->dma_init) {
+ ret = dws->dma_ops->dma_init(dev, dws);
+ if (ret == -EPROBE_DEFER) {
+ goto err_free_irq;
+ } else if (ret) {
+ dev_warn(dev, "DMA init failed\n");
+ } else {
+ master->can_dma = dws->dma_ops->can_dma;
+ master->flags |= SPI_CONTROLLER_MUST_TX;
+ }
+ }
+
+ ret = spi_register_controller(master);
+ if (ret) {
+ dev_err_probe(dev, ret, "problem registering spi master\n");
+ goto err_dma_exit;
+ }
+
+ dw_spi_debugfs_init(dws);
+ return 0;
+
+err_dma_exit:
+ if (dws->dma_ops && dws->dma_ops->dma_exit)
+ dws->dma_ops->dma_exit(dws);
+ dw_spi_enable_chip(dws, 0);
+err_free_irq:
+ free_irq(dws->irq, master);
+err_free_master:
+ spi_controller_put(master);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(dw_spi_add_host, SPI_DW_CORE);
+
+void dw_spi_remove_host(struct dw_spi *dws)
+{
+ dw_spi_debugfs_remove(dws);
+
+ spi_unregister_controller(dws->master);
+
+ if (dws->dma_ops && dws->dma_ops->dma_exit)
+ dws->dma_ops->dma_exit(dws);
+
+ dw_spi_shutdown_chip(dws);
+
+ free_irq(dws->irq, dws->master);
+}
+EXPORT_SYMBOL_NS_GPL(dw_spi_remove_host, SPI_DW_CORE);
+
+int dw_spi_suspend_host(struct dw_spi *dws)
+{
+ int ret;
+
+ ret = spi_controller_suspend(dws->master);
+ if (ret)
+ return ret;
+
+ dw_spi_shutdown_chip(dws);
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(dw_spi_suspend_host, SPI_DW_CORE);
+
+int dw_spi_resume_host(struct dw_spi *dws)
+{
+ dw_spi_hw_init(&dws->master->dev, dws);
+ return spi_controller_resume(dws->master);
+}
+EXPORT_SYMBOL_NS_GPL(dw_spi_resume_host, SPI_DW_CORE);
+
+MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
+MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
new file mode 100644
index 000000000..ababb910b
--- /dev/null
+++ b/drivers/spi/spi-dw-dma.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Special handling for DW DMA core
+ *
+ * Copyright (c) 2009, 2014 Intel Corporation.
+ */
+
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/irqreturn.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/dma-dw.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "spi-dw.h"
+
+#define DW_SPI_RX_BUSY 0
+#define DW_SPI_RX_BURST_LEVEL 16
+#define DW_SPI_TX_BUSY 1
+#define DW_SPI_TX_BURST_LEVEL 16
+
+static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_slave *s = param;
+
+ if (s->dma_dev != chan->device->dev)
+ return false;
+
+ chan->private = s;
+ return true;
+}
+
+static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
+{
+ struct dma_slave_caps caps;
+ u32 max_burst, def_burst;
+ int ret;
+
+ def_burst = dws->fifo_len / 2;
+
+ ret = dma_get_slave_caps(dws->rxchan, &caps);
+ if (!ret && caps.max_burst)
+ max_burst = caps.max_burst;
+ else
+ max_burst = DW_SPI_RX_BURST_LEVEL;
+
+ dws->rxburst = min(max_burst, def_burst);
+ dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
+
+ ret = dma_get_slave_caps(dws->txchan, &caps);
+ if (!ret && caps.max_burst)
+ max_burst = caps.max_burst;
+ else
+ max_burst = DW_SPI_TX_BURST_LEVEL;
+
+ /*
+ * Having a Rx DMA channel serviced with higher priority than a Tx DMA
+ * channel might not be enough to provide a well balanced DMA-based
+ * SPI transfer interface. There might still be moments when the Tx DMA
+ * channel is occasionally handled faster than the Rx DMA channel.
+ * That in its turn will eventually cause the SPI Rx FIFO overflow if
+ * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
+ * cleared by the Rx DMA channel. In order to fix the problem the Tx
+ * DMA activity is intentionally slowed down by limiting the SPI Tx
+ * FIFO depth with a value twice bigger than the Tx burst length.
+ */
+ dws->txburst = min(max_burst, def_burst);
+ dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
+}
+
+static void dw_spi_dma_sg_burst_init(struct dw_spi *dws)
+{
+ struct dma_slave_caps tx = {0}, rx = {0};
+
+ dma_get_slave_caps(dws->txchan, &tx);
+ dma_get_slave_caps(dws->rxchan, &rx);
+
+ if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0)
+ dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst);
+ else if (tx.max_sg_burst > 0)
+ dws->dma_sg_burst = tx.max_sg_burst;
+ else if (rx.max_sg_burst > 0)
+ dws->dma_sg_burst = rx.max_sg_burst;
+ else
+ dws->dma_sg_burst = 0;
+}
+
+static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
+{
+ struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
+ struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
+ struct pci_dev *dma_dev;
+ dma_cap_mask_t mask;
+
+ /*
+ * Get pci device for DMA controller, currently it could only
+ * be the DMA controller of Medfield
+ */
+ dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+ if (!dma_dev)
+ return -ENODEV;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* 1. Init rx channel */
+ rx->dma_dev = &dma_dev->dev;
+ dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
+ if (!dws->rxchan)
+ goto err_exit;
+
+ /* 2. Init tx channel */
+ tx->dma_dev = &dma_dev->dev;
+ dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
+ if (!dws->txchan)
+ goto free_rxchan;
+
+ dws->master->dma_rx = dws->rxchan;
+ dws->master->dma_tx = dws->txchan;
+
+ init_completion(&dws->dma_completion);
+
+ dw_spi_dma_maxburst_init(dws);
+
+ dw_spi_dma_sg_burst_init(dws);
+
+ pci_dev_put(dma_dev);
+
+ return 0;
+
+free_rxchan:
+ dma_release_channel(dws->rxchan);
+ dws->rxchan = NULL;
+err_exit:
+ pci_dev_put(dma_dev);
+ return -EBUSY;
+}
+
+static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
+{
+ int ret;
+
+ dws->rxchan = dma_request_chan(dev, "rx");
+ if (IS_ERR(dws->rxchan)) {
+ ret = PTR_ERR(dws->rxchan);
+ dws->rxchan = NULL;
+ goto err_exit;
+ }
+
+ dws->txchan = dma_request_chan(dev, "tx");
+ if (IS_ERR(dws->txchan)) {
+ ret = PTR_ERR(dws->txchan);
+ dws->txchan = NULL;
+ goto free_rxchan;
+ }
+
+ dws->master->dma_rx = dws->rxchan;
+ dws->master->dma_tx = dws->txchan;
+
+ init_completion(&dws->dma_completion);
+
+ dw_spi_dma_maxburst_init(dws);
+
+ dw_spi_dma_sg_burst_init(dws);
+
+ return 0;
+
+free_rxchan:
+ dma_release_channel(dws->rxchan);
+ dws->rxchan = NULL;
+err_exit:
+ return ret;
+}
+
+static void dw_spi_dma_exit(struct dw_spi *dws)
+{
+ if (dws->txchan) {
+ dmaengine_terminate_sync(dws->txchan);
+ dma_release_channel(dws->txchan);
+ }
+
+ if (dws->rxchan) {
+ dmaengine_terminate_sync(dws->rxchan);
+ dma_release_channel(dws->rxchan);
+ }
+}
+
+static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
+{
+ dw_spi_check_status(dws, false);
+
+ complete(&dws->dma_completion);
+
+ return IRQ_HANDLED;
+}
+
+static bool dw_spi_can_dma(struct spi_controller *master,
+ struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct dw_spi *dws = spi_controller_get_devdata(master);
+
+ return xfer->len > dws->fifo_len;
+}
+
+static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
+{
+ if (n_bytes == 1)
+ return DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (n_bytes == 2)
+ return DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+ return DMA_SLAVE_BUSWIDTH_UNDEFINED;
+}
+
+static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
+{
+ unsigned long long ms;
+
+ ms = len * MSEC_PER_SEC * BITS_PER_BYTE;
+ do_div(ms, speed);
+ ms += ms + 200;
+
+ if (ms > UINT_MAX)
+ ms = UINT_MAX;
+
+ ms = wait_for_completion_timeout(&dws->dma_completion,
+ msecs_to_jiffies(ms));
+
+ if (ms == 0) {
+ dev_err(&dws->master->cur_msg->spi->dev,
+ "DMA transaction timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
+{
+ return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT);
+}
+
+static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
+ struct spi_transfer *xfer)
+{
+ int retry = DW_SPI_WAIT_RETRIES;
+ struct spi_delay delay;
+ u32 nents;
+
+ nents = dw_readl(dws, DW_SPI_TXFLR);
+ delay.unit = SPI_DELAY_UNIT_SCK;
+ delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
+
+ while (dw_spi_dma_tx_busy(dws) && retry--)
+ spi_delay_exec(&delay, xfer);
+
+ if (retry < 0) {
+ dev_err(&dws->master->dev, "Tx hanged up\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
+ * channel will clear a corresponding bit.
+ */
+static void dw_spi_dma_tx_done(void *arg)
+{
+ struct dw_spi *dws = arg;
+
+ clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy))
+ return;
+
+ complete(&dws->dma_completion);
+}
+
+static int dw_spi_dma_config_tx(struct dw_spi *dws)
+{
+ struct dma_slave_config txconf;
+
+ memset(&txconf, 0, sizeof(txconf));
+ txconf.direction = DMA_MEM_TO_DEV;
+ txconf.dst_addr = dws->dma_addr;
+ txconf.dst_maxburst = dws->txburst;
+ txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
+ txconf.device_fc = false;
+
+ return dmaengine_slave_config(dws->txchan, &txconf);
+}
+
+static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl,
+ unsigned int nents)
+{
+ struct dma_async_tx_descriptor *txdesc;
+ dma_cookie_t cookie;
+ int ret;
+
+ txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ return -ENOMEM;
+
+ txdesc->callback = dw_spi_dma_tx_done;
+ txdesc->callback_param = dws;
+
+ cookie = dmaengine_submit(txdesc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_terminate_sync(dws->txchan);
+ return ret;
+ }
+
+ set_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
+
+ return 0;
+}
+
+static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
+{
+ return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT);
+}
+
+static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
+{
+ int retry = DW_SPI_WAIT_RETRIES;
+ struct spi_delay delay;
+ unsigned long ns, us;
+ u32 nents;
+
+ /*
+ * It's unlikely that DMA engine is still doing the data fetching, but
+ * if it's let's give it some reasonable time. The timeout calculation
+ * is based on the synchronous APB/SSI reference clock rate, on a
+ * number of data entries left in the Rx FIFO, times a number of clock
+ * periods normally needed for a single APB read/write transaction
+ * without PREADY signal utilized (which is true for the DW APB SSI
+ * controller).
+ */
+ nents = dw_readl(dws, DW_SPI_RXFLR);
+ ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
+ if (ns <= NSEC_PER_USEC) {
+ delay.unit = SPI_DELAY_UNIT_NSECS;
+ delay.value = ns;
+ } else {
+ us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
+ delay.unit = SPI_DELAY_UNIT_USECS;
+ delay.value = clamp_val(us, 0, USHRT_MAX);
+ }
+
+ while (dw_spi_dma_rx_busy(dws) && retry--)
+ spi_delay_exec(&delay, NULL);
+
+ if (retry < 0) {
+ dev_err(&dws->master->dev, "Rx hanged up\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
+ * channel will clear a corresponding bit.
+ */
+static void dw_spi_dma_rx_done(void *arg)
+{
+ struct dw_spi *dws = arg;
+
+ clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy))
+ return;
+
+ complete(&dws->dma_completion);
+}
+
+static int dw_spi_dma_config_rx(struct dw_spi *dws)
+{
+ struct dma_slave_config rxconf;
+
+ memset(&rxconf, 0, sizeof(rxconf));
+ rxconf.direction = DMA_DEV_TO_MEM;
+ rxconf.src_addr = dws->dma_addr;
+ rxconf.src_maxburst = dws->rxburst;
+ rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
+ rxconf.device_fc = false;
+
+ return dmaengine_slave_config(dws->rxchan, &rxconf);
+}
+
+static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl,
+ unsigned int nents)
+{
+ struct dma_async_tx_descriptor *rxdesc;
+ dma_cookie_t cookie;
+ int ret;
+
+ rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ return -ENOMEM;
+
+ rxdesc->callback = dw_spi_dma_rx_done;
+ rxdesc->callback_param = dws;
+
+ cookie = dmaengine_submit(rxdesc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_terminate_sync(dws->rxchan);
+ return ret;
+ }
+
+ set_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
+
+ return 0;
+}
+
+static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
+{
+ u16 imr, dma_ctrl;
+ int ret;
+
+ if (!xfer->tx_buf)
+ return -EINVAL;
+
+ /* Setup DMA channels */
+ ret = dw_spi_dma_config_tx(dws);
+ if (ret)
+ return ret;
+
+ if (xfer->rx_buf) {
+ ret = dw_spi_dma_config_rx(dws);
+ if (ret)
+ return ret;
+ }
+
+ /* Set the DMA handshaking interface */
+ dma_ctrl = DW_SPI_DMACR_TDMAE;
+ if (xfer->rx_buf)
+ dma_ctrl |= DW_SPI_DMACR_RDMAE;
+ dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
+
+ /* Set the interrupt mask */
+ imr = DW_SPI_INT_TXOI;
+ if (xfer->rx_buf)
+ imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI;
+ dw_spi_umask_intr(dws, imr);
+
+ reinit_completion(&dws->dma_completion);
+
+ dws->transfer_handler = dw_spi_dma_transfer_handler;
+
+ return 0;
+}
+
+static int dw_spi_dma_transfer_all(struct dw_spi *dws,
+ struct spi_transfer *xfer)
+{
+ int ret;
+
+ /* Submit the DMA Tx transfer */
+ ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
+ if (ret)
+ goto err_clear_dmac;
+
+ /* Submit the DMA Rx transfer if required */
+ if (xfer->rx_buf) {
+ ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl,
+ xfer->rx_sg.nents);
+ if (ret)
+ goto err_clear_dmac;
+
+ /* rx must be started before tx due to spi instinct */
+ dma_async_issue_pending(dws->rxchan);
+ }
+
+ dma_async_issue_pending(dws->txchan);
+
+ ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz);
+
+err_clear_dmac:
+ dw_writel(dws, DW_SPI_DMACR, 0);
+
+ return ret;
+}
+
+/*
+ * In case if at least one of the requested DMA channels doesn't support the
+ * hardware accelerated SG list entries traverse, the DMA driver will most
+ * likely work that around by performing the IRQ-based SG list entries
+ * resubmission. That might and will cause a problem if the DMA Tx channel is
+ * recharged and re-executed before the Rx DMA channel. Due to
+ * non-deterministic IRQ-handler execution latency the DMA Tx channel will
+ * start pushing data to the SPI bus before the Rx DMA channel is even
+ * reinitialized with the next inbound SG list entry. By doing so the DMA Tx
+ * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while
+ * the DMA Rx channel being recharged and re-executed will eventually be
+ * overflown.
+ *
+ * In order to solve the problem we have to feed the DMA engine with SG list
+ * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs
+ * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg
+ * and rx_sg lists may have different number of entries of different lengths
+ * (though total length should match) let's virtually split the SG-lists to the
+ * set of DMA transfers, which length is a minimum of the ordered SG-entries
+ * lengths. An ASCII-sketch of the implemented algo is following:
+ * xfer->len
+ * |___________|
+ * tx_sg list: |___|____|__|
+ * rx_sg list: |_|____|____|
+ * DMA transfers: |_|_|__|_|__|
+ *
+ * Note in order to have this workaround solving the denoted problem the DMA
+ * engine driver should properly initialize the max_sg_burst capability and set
+ * the DMA device max segment size parameter with maximum data block size the
+ * DMA engine supports.
+ */
+
+static int dw_spi_dma_transfer_one(struct dw_spi *dws,
+ struct spi_transfer *xfer)
+{
+ struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp;
+ unsigned int tx_len = 0, rx_len = 0;
+ unsigned int base, len;
+ int ret;
+
+ sg_init_table(&tx_tmp, 1);
+ sg_init_table(&rx_tmp, 1);
+
+ for (base = 0, len = 0; base < xfer->len; base += len) {
+ /* Fetch next Tx DMA data chunk */
+ if (!tx_len) {
+ tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg);
+ sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg);
+ tx_len = sg_dma_len(tx_sg);
+ }
+
+ /* Fetch next Rx DMA data chunk */
+ if (!rx_len) {
+ rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg);
+ sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg);
+ rx_len = sg_dma_len(rx_sg);
+ }
+
+ len = min(tx_len, rx_len);
+
+ sg_dma_len(&tx_tmp) = len;
+ sg_dma_len(&rx_tmp) = len;
+
+ /* Submit DMA Tx transfer */
+ ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1);
+ if (ret)
+ break;
+
+ /* Submit DMA Rx transfer */
+ ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1);
+ if (ret)
+ break;
+
+ /* Rx must be started before Tx due to SPI instinct */
+ dma_async_issue_pending(dws->rxchan);
+
+ dma_async_issue_pending(dws->txchan);
+
+ /*
+ * Here we only need to wait for the DMA transfer to be
+ * finished since SPI controller is kept enabled during the
+ * procedure this loop implements and there is no risk to lose
+ * data left in the Tx/Rx FIFOs.
+ */
+ ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz);
+ if (ret)
+ break;
+
+ reinit_completion(&dws->dma_completion);
+
+ sg_dma_address(&tx_tmp) += len;
+ sg_dma_address(&rx_tmp) += len;
+ tx_len -= len;
+ rx_len -= len;
+ }
+
+ dw_writel(dws, DW_SPI_DMACR, 0);
+
+ return ret;
+}
+
+static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
+{
+ unsigned int nents;
+ int ret;
+
+ nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents);
+
+ /*
+ * Execute normal DMA-based transfer (which submits the Rx and Tx SG
+ * lists directly to the DMA engine at once) if either full hardware
+ * accelerated SG list traverse is supported by both channels, or the
+ * Tx-only SPI transfer is requested, or the DMA engine is capable to
+ * handle both SG lists on hardware accelerated basis.
+ */
+ if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst)
+ ret = dw_spi_dma_transfer_all(dws, xfer);
+ else
+ ret = dw_spi_dma_transfer_one(dws, xfer);
+ if (ret)
+ return ret;
+
+ if (dws->master->cur_msg->status == -EINPROGRESS) {
+ ret = dw_spi_dma_wait_tx_done(dws, xfer);
+ if (ret)
+ return ret;
+ }
+
+ if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS)
+ ret = dw_spi_dma_wait_rx_done(dws);
+
+ return ret;
+}
+
+static void dw_spi_dma_stop(struct dw_spi *dws)
+{
+ if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) {
+ dmaengine_terminate_sync(dws->txchan);
+ clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
+ }
+ if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) {
+ dmaengine_terminate_sync(dws->rxchan);
+ clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
+ }
+}
+
+static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
+ .dma_init = dw_spi_dma_init_mfld,
+ .dma_exit = dw_spi_dma_exit,
+ .dma_setup = dw_spi_dma_setup,
+ .can_dma = dw_spi_can_dma,
+ .dma_transfer = dw_spi_dma_transfer,
+ .dma_stop = dw_spi_dma_stop,
+};
+
+void dw_spi_dma_setup_mfld(struct dw_spi *dws)
+{
+ dws->dma_ops = &dw_spi_dma_mfld_ops;
+}
+EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_mfld, SPI_DW_CORE);
+
+static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
+ .dma_init = dw_spi_dma_init_generic,
+ .dma_exit = dw_spi_dma_exit,
+ .dma_setup = dw_spi_dma_setup,
+ .can_dma = dw_spi_can_dma,
+ .dma_transfer = dw_spi_dma_transfer,
+ .dma_stop = dw_spi_dma_stop,
+};
+
+void dw_spi_dma_setup_generic(struct dw_spi *dws)
+{
+ dws->dma_ops = &dw_spi_dma_generic_ops;
+}
+EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_generic, SPI_DW_CORE);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
new file mode 100644
index 000000000..8046e9138
--- /dev/null
+++ b/drivers/spi/spi-dw-mmio.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Memory-mapped interface driver for DW SPI Core
+ *
+ * Copyright (c) 2010, Octasic semiconductor.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/scatterlist.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/acpi.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "spi-dw.h"
+
+#define DRIVER_NAME "dw_spi_mmio"
+
+struct dw_spi_mmio {
+ struct dw_spi dws;
+ struct clk *clk;
+ struct clk *pclk;
+ void *priv;
+ struct reset_control *rstc;
+};
+
+#define MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24
+#define OCELOT_IF_SI_OWNER_OFFSET 4
+#define JAGUAR2_IF_SI_OWNER_OFFSET 6
+#define MSCC_IF_SI_OWNER_MASK GENMASK(1, 0)
+#define MSCC_IF_SI_OWNER_SISL 0
+#define MSCC_IF_SI_OWNER_SIBM 1
+#define MSCC_IF_SI_OWNER_SIMC 2
+
+#define MSCC_SPI_MST_SW_MODE 0x14
+#define MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE BIT(13)
+#define MSCC_SPI_MST_SW_MODE_SW_SPI_CS(x) (x << 5)
+
+#define SPARX5_FORCE_ENA 0xa4
+#define SPARX5_FORCE_VAL 0xa8
+
+struct dw_spi_mscc {
+ struct regmap *syscon;
+ void __iomem *spi_mst; /* Not sparx5 */
+};
+
+/*
+ * The Designware SPI controller (referred to as master in the documentation)
+ * automatically deasserts chip select when the tx fifo is empty. The chip
+ * selects then needs to be either driven as GPIOs or, for the first 4 using
+ * the SPI boot controller registers. the final chip select is an OR gate
+ * between the Designware SPI controller and the SPI boot controller.
+ */
+static void dw_spi_mscc_set_cs(struct spi_device *spi, bool enable)
+{
+ struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
+ struct dw_spi_mscc *dwsmscc = dwsmmio->priv;
+ u32 cs = spi->chip_select;
+
+ if (cs < 4) {
+ u32 sw_mode = MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE;
+
+ if (!enable)
+ sw_mode |= MSCC_SPI_MST_SW_MODE_SW_SPI_CS(BIT(cs));
+
+ writel(sw_mode, dwsmscc->spi_mst + MSCC_SPI_MST_SW_MODE);
+ }
+
+ dw_spi_set_cs(spi, enable);
+}
+
+static int dw_spi_mscc_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio,
+ const char *cpu_syscon, u32 if_si_owner_offset)
+{
+ struct dw_spi_mscc *dwsmscc;
+
+ dwsmscc = devm_kzalloc(&pdev->dev, sizeof(*dwsmscc), GFP_KERNEL);
+ if (!dwsmscc)
+ return -ENOMEM;
+
+ dwsmscc->spi_mst = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dwsmscc->spi_mst)) {
+ dev_err(&pdev->dev, "SPI_MST region map failed\n");
+ return PTR_ERR(dwsmscc->spi_mst);
+ }
+
+ dwsmscc->syscon = syscon_regmap_lookup_by_compatible(cpu_syscon);
+ if (IS_ERR(dwsmscc->syscon))
+ return PTR_ERR(dwsmscc->syscon);
+
+ /* Deassert all CS */
+ writel(0, dwsmscc->spi_mst + MSCC_SPI_MST_SW_MODE);
+
+ /* Select the owner of the SI interface */
+ regmap_update_bits(dwsmscc->syscon, MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL,
+ MSCC_IF_SI_OWNER_MASK << if_si_owner_offset,
+ MSCC_IF_SI_OWNER_SIMC << if_si_owner_offset);
+
+ dwsmmio->dws.set_cs = dw_spi_mscc_set_cs;
+ dwsmmio->priv = dwsmscc;
+
+ return 0;
+}
+
+static int dw_spi_mscc_ocelot_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ return dw_spi_mscc_init(pdev, dwsmmio, "mscc,ocelot-cpu-syscon",
+ OCELOT_IF_SI_OWNER_OFFSET);
+}
+
+static int dw_spi_mscc_jaguar2_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ return dw_spi_mscc_init(pdev, dwsmmio, "mscc,jaguar2-cpu-syscon",
+ JAGUAR2_IF_SI_OWNER_OFFSET);
+}
+
+/*
+ * The Designware SPI controller (referred to as master in the
+ * documentation) automatically deasserts chip select when the tx fifo
+ * is empty. The chip selects then needs to be driven by a CS override
+ * register. enable is an active low signal.
+ */
+static void dw_spi_sparx5_set_cs(struct spi_device *spi, bool enable)
+{
+ struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
+ struct dw_spi_mscc *dwsmscc = dwsmmio->priv;
+ u8 cs = spi->chip_select;
+
+ if (!enable) {
+ /* CS override drive enable */
+ regmap_write(dwsmscc->syscon, SPARX5_FORCE_ENA, 1);
+ /* Now set CSx enabled */
+ regmap_write(dwsmscc->syscon, SPARX5_FORCE_VAL, ~BIT(cs));
+ /* Allow settle */
+ usleep_range(1, 5);
+ } else {
+ /* CS value */
+ regmap_write(dwsmscc->syscon, SPARX5_FORCE_VAL, ~0);
+ /* Allow settle */
+ usleep_range(1, 5);
+ /* CS override drive disable */
+ regmap_write(dwsmscc->syscon, SPARX5_FORCE_ENA, 0);
+ }
+
+ dw_spi_set_cs(spi, enable);
+}
+
+static int dw_spi_mscc_sparx5_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ const char *syscon_name = "microchip,sparx5-cpu-syscon";
+ struct device *dev = &pdev->dev;
+ struct dw_spi_mscc *dwsmscc;
+
+ if (!IS_ENABLED(CONFIG_SPI_MUX)) {
+ dev_err(dev, "This driver needs CONFIG_SPI_MUX\n");
+ return -EOPNOTSUPP;
+ }
+
+ dwsmscc = devm_kzalloc(dev, sizeof(*dwsmscc), GFP_KERNEL);
+ if (!dwsmscc)
+ return -ENOMEM;
+
+ dwsmscc->syscon =
+ syscon_regmap_lookup_by_compatible(syscon_name);
+ if (IS_ERR(dwsmscc->syscon)) {
+ dev_err(dev, "No syscon map %s\n", syscon_name);
+ return PTR_ERR(dwsmscc->syscon);
+ }
+
+ dwsmmio->dws.set_cs = dw_spi_sparx5_set_cs;
+ dwsmmio->priv = dwsmscc;
+
+ return 0;
+}
+
+static int dw_spi_alpine_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ dwsmmio->dws.caps = DW_SPI_CAP_CS_OVERRIDE;
+
+ return 0;
+}
+
+static int dw_spi_pssi_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ dw_spi_dma_setup_generic(&dwsmmio->dws);
+
+ return 0;
+}
+
+static int dw_spi_hssi_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ dwsmmio->dws.ip = DW_HSSI_ID;
+
+ dw_spi_dma_setup_generic(&dwsmmio->dws);
+
+ return 0;
+}
+
+static int dw_spi_intel_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ dwsmmio->dws.ip = DW_HSSI_ID;
+
+ return 0;
+}
+
+/*
+ * DMA-based mem ops are not configured for this device and are not tested.
+ */
+static int dw_spi_mountevans_imc_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ /*
+ * The Intel Mount Evans SoC's Integrated Management Complex DW
+ * apb_ssi_v4.02a controller has an errata where a full TX FIFO can
+ * result in data corruption. The suggested workaround is to never
+ * completely fill the FIFO. The TX FIFO has a size of 32 so the
+ * fifo_len is set to 31.
+ */
+ dwsmmio->dws.fifo_len = 31;
+
+ return 0;
+}
+
+static int dw_spi_canaan_k210_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ /*
+ * The Canaan Kendryte K210 SoC DW apb_ssi v4 spi controller is
+ * documented to have a 32 word deep TX and RX FIFO, which
+ * spi_hw_init() detects. However, when the RX FIFO is filled up to
+ * 32 entries (RXFLR = 32), an RX FIFO overrun error occurs. Avoid this
+ * problem by force setting fifo_len to 31.
+ */
+ dwsmmio->dws.fifo_len = 31;
+
+ return 0;
+}
+
+static int dw_spi_mmio_probe(struct platform_device *pdev)
+{
+ int (*init_func)(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio);
+ struct dw_spi_mmio *dwsmmio;
+ struct resource *mem;
+ struct dw_spi *dws;
+ int ret;
+ int num_cs;
+
+ dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_mmio),
+ GFP_KERNEL);
+ if (!dwsmmio)
+ return -ENOMEM;
+
+ dws = &dwsmmio->dws;
+
+ /* Get basic io resource and map it */
+ dws->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+ if (IS_ERR(dws->regs))
+ return PTR_ERR(dws->regs);
+
+ dws->paddr = mem->start;
+
+ dws->irq = platform_get_irq(pdev, 0);
+ if (dws->irq < 0)
+ return dws->irq; /* -ENXIO */
+
+ dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dwsmmio->clk))
+ return PTR_ERR(dwsmmio->clk);
+ ret = clk_prepare_enable(dwsmmio->clk);
+ if (ret)
+ return ret;
+
+ /* Optional clock needed to access the registers */
+ dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
+ if (IS_ERR(dwsmmio->pclk)) {
+ ret = PTR_ERR(dwsmmio->pclk);
+ goto out_clk;
+ }
+ ret = clk_prepare_enable(dwsmmio->pclk);
+ if (ret)
+ goto out_clk;
+
+ /* find an optional reset controller */
+ dwsmmio->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, "spi");
+ if (IS_ERR(dwsmmio->rstc)) {
+ ret = PTR_ERR(dwsmmio->rstc);
+ goto out_clk;
+ }
+ reset_control_deassert(dwsmmio->rstc);
+
+ dws->bus_num = pdev->id;
+
+ dws->max_freq = clk_get_rate(dwsmmio->clk);
+
+ device_property_read_u32(&pdev->dev, "reg-io-width", &dws->reg_io_width);
+
+ num_cs = 4;
+
+ device_property_read_u32(&pdev->dev, "num-cs", &num_cs);
+
+ dws->num_cs = num_cs;
+
+ init_func = device_get_match_data(&pdev->dev);
+ if (init_func) {
+ ret = init_func(pdev, dwsmmio);
+ if (ret)
+ goto out;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = dw_spi_add_host(&pdev->dev, dws);
+ if (ret)
+ goto out;
+
+ platform_set_drvdata(pdev, dwsmmio);
+ return 0;
+
+out:
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(dwsmmio->pclk);
+out_clk:
+ clk_disable_unprepare(dwsmmio->clk);
+ reset_control_assert(dwsmmio->rstc);
+
+ return ret;
+}
+
+static int dw_spi_mmio_remove(struct platform_device *pdev)
+{
+ struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
+
+ dw_spi_remove_host(&dwsmmio->dws);
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(dwsmmio->pclk);
+ clk_disable_unprepare(dwsmmio->clk);
+ reset_control_assert(dwsmmio->rstc);
+
+ return 0;
+}
+
+static const struct of_device_id dw_spi_mmio_of_match[] = {
+ { .compatible = "snps,dw-apb-ssi", .data = dw_spi_pssi_init},
+ { .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_ocelot_init},
+ { .compatible = "mscc,jaguar2-spi", .data = dw_spi_mscc_jaguar2_init},
+ { .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init},
+ { .compatible = "renesas,rzn1-spi", .data = dw_spi_pssi_init},
+ { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init},
+ { .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init},
+ { .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init},
+ {
+ .compatible = "intel,mountevans-imc-ssi",
+ .data = dw_spi_mountevans_imc_init,
+ },
+ { .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init},
+ { .compatible = "canaan,k210-spi", dw_spi_canaan_k210_init},
+ { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id dw_spi_mmio_acpi_match[] = {
+ {"HISI0173", (kernel_ulong_t)dw_spi_pssi_init},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match);
+#endif
+
+static struct platform_driver dw_spi_mmio_driver = {
+ .probe = dw_spi_mmio_probe,
+ .remove = dw_spi_mmio_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dw_spi_mmio_of_match,
+ .acpi_match_table = ACPI_PTR(dw_spi_mmio_acpi_match),
+ },
+};
+module_platform_driver(dw_spi_mmio_driver);
+
+MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>");
+MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(SPI_DW_CORE);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
new file mode 100644
index 000000000..7c8279d13
--- /dev/null
+++ b/drivers/spi/spi-dw-pci.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCI interface driver for DW SPI Core
+ *
+ * Copyright (c) 2009, 2014 Intel Corporation.
+ */
+
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+
+#include "spi-dw.h"
+
+#define DRIVER_NAME "dw_spi_pci"
+
+/* HW info for MRST Clk Control Unit, 32b reg per controller */
+#define MRST_SPI_CLK_BASE 100000000 /* 100m */
+#define MRST_CLK_SPI_REG 0xff11d86c
+#define CLK_SPI_BDIV_OFFSET 0
+#define CLK_SPI_BDIV_MASK 0x00000007
+#define CLK_SPI_CDIV_OFFSET 9
+#define CLK_SPI_CDIV_MASK 0x00000e00
+#define CLK_SPI_DISABLE_OFFSET 8
+
+struct dw_spi_pci_desc {
+ int (*setup)(struct dw_spi *);
+ u16 num_cs;
+ u16 bus_num;
+ u32 max_freq;
+};
+
+static int dw_spi_pci_mid_init(struct dw_spi *dws)
+{
+ void __iomem *clk_reg;
+ u32 clk_cdiv;
+
+ clk_reg = ioremap(MRST_CLK_SPI_REG, 16);
+ if (!clk_reg)
+ return -ENOMEM;
+
+ /* Get SPI controller operating freq info */
+ clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
+ clk_cdiv &= CLK_SPI_CDIV_MASK;
+ clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
+ dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
+
+ iounmap(clk_reg);
+
+ dw_spi_dma_setup_mfld(dws);
+
+ return 0;
+}
+
+static int dw_spi_pci_generic_init(struct dw_spi *dws)
+{
+ dw_spi_dma_setup_generic(dws);
+
+ return 0;
+}
+
+static struct dw_spi_pci_desc dw_spi_pci_mid_desc_1 = {
+ .setup = dw_spi_pci_mid_init,
+ .num_cs = 5,
+ .bus_num = 0,
+};
+
+static struct dw_spi_pci_desc dw_spi_pci_mid_desc_2 = {
+ .setup = dw_spi_pci_mid_init,
+ .num_cs = 2,
+ .bus_num = 1,
+};
+
+static struct dw_spi_pci_desc dw_spi_pci_ehl_desc = {
+ .setup = dw_spi_pci_generic_init,
+ .num_cs = 2,
+ .bus_num = -1,
+ .max_freq = 100000000,
+};
+
+static int dw_spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct dw_spi_pci_desc *desc = (struct dw_spi_pci_desc *)ent->driver_data;
+ struct dw_spi *dws;
+ int pci_bar = 0;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dws = devm_kzalloc(&pdev->dev, sizeof(*dws), GFP_KERNEL);
+ if (!dws)
+ return -ENOMEM;
+
+ /* Get basic io resource and map it */
+ dws->paddr = pci_resource_start(pdev, pci_bar);
+ pci_set_master(pdev);
+
+ ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev));
+ if (ret)
+ return ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
+ dws->regs = pcim_iomap_table(pdev)[pci_bar];
+ dws->irq = pci_irq_vector(pdev, 0);
+
+ /*
+ * Specific handling for platforms, like dma setup,
+ * clock rate, FIFO depth.
+ */
+ if (desc) {
+ dws->num_cs = desc->num_cs;
+ dws->bus_num = desc->bus_num;
+ dws->max_freq = desc->max_freq;
+
+ if (desc->setup) {
+ ret = desc->setup(dws);
+ if (ret)
+ goto err_free_irq_vectors;
+ }
+ } else {
+ ret = -ENODEV;
+ goto err_free_irq_vectors;
+ }
+
+ ret = dw_spi_add_host(&pdev->dev, dws);
+ if (ret)
+ goto err_free_irq_vectors;
+
+ /* PCI hook and SPI hook use the same drv data */
+ pci_set_drvdata(pdev, dws);
+
+ dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
+ pdev->vendor, pdev->device);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return ret;
+}
+
+static void dw_spi_pci_remove(struct pci_dev *pdev)
+{
+ struct dw_spi *dws = pci_get_drvdata(pdev);
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ dw_spi_remove_host(dws);
+ pci_free_irq_vectors(pdev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dw_spi_pci_suspend(struct device *dev)
+{
+ struct dw_spi *dws = dev_get_drvdata(dev);
+
+ return dw_spi_suspend_host(dws);
+}
+
+static int dw_spi_pci_resume(struct device *dev)
+{
+ struct dw_spi *dws = dev_get_drvdata(dev);
+
+ return dw_spi_resume_host(dws);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dw_spi_pci_pm_ops, dw_spi_pci_suspend, dw_spi_pci_resume);
+
+static const struct pci_device_id dw_spi_pci_ids[] = {
+ /* Intel MID platform SPI controller 0 */
+ /*
+ * The access to the device 8086:0801 is disabled by HW, since it's
+ * exclusively used by SCU to communicate with MSIC.
+ */
+ /* Intel MID platform SPI controller 1 */
+ { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&dw_spi_pci_mid_desc_1},
+ /* Intel MID platform SPI controller 2 */
+ { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&dw_spi_pci_mid_desc_2},
+ /* Intel Elkhart Lake PSE SPI controllers */
+ { PCI_VDEVICE(INTEL, 0x4b84), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
+ { PCI_VDEVICE(INTEL, 0x4b85), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
+ { PCI_VDEVICE(INTEL, 0x4b86), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
+ { PCI_VDEVICE(INTEL, 0x4b87), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
+ {},
+};
+MODULE_DEVICE_TABLE(pci, dw_spi_pci_ids);
+
+static struct pci_driver dw_spi_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = dw_spi_pci_ids,
+ .probe = dw_spi_pci_probe,
+ .remove = dw_spi_pci_remove,
+ .driver = {
+ .pm = &dw_spi_pci_pm_ops,
+ },
+};
+module_pci_driver(dw_spi_pci_driver);
+
+MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
+MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(SPI_DW_CORE);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
new file mode 100644
index 000000000..9e8eb2b52
--- /dev/null
+++ b/drivers/spi/spi-dw.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SPI_DW_H__
+#define __SPI_DW_H__
+
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/irqreturn.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/bitfield.h>
+
+/* Synopsys DW SSI IP-core virtual IDs */
+#define DW_PSSI_ID 0
+#define DW_HSSI_ID 1
+
+/* Synopsys DW SSI component versions (FourCC sequence) */
+#define DW_HSSI_102A 0x3130322a
+
+/* DW SSI IP-core ID and version check helpers */
+#define dw_spi_ip_is(_dws, _ip) \
+ ((_dws)->ip == DW_ ## _ip ## _ID)
+
+#define __dw_spi_ver_cmp(_dws, _ip, _ver, _op) \
+ (dw_spi_ip_is(_dws, _ip) && (_dws)->ver _op DW_ ## _ip ## _ ## _ver)
+
+#define dw_spi_ver_is(_dws, _ip, _ver) __dw_spi_ver_cmp(_dws, _ip, _ver, ==)
+
+#define dw_spi_ver_is_ge(_dws, _ip, _ver) __dw_spi_ver_cmp(_dws, _ip, _ver, >=)
+
+/* DW SPI controller capabilities */
+#define DW_SPI_CAP_CS_OVERRIDE BIT(0)
+#define DW_SPI_CAP_DFS32 BIT(1)
+
+/* Register offsets (Generic for both DWC APB SSI and DWC SSI IP-cores) */
+#define DW_SPI_CTRLR0 0x00
+#define DW_SPI_CTRLR1 0x04
+#define DW_SPI_SSIENR 0x08
+#define DW_SPI_MWCR 0x0c
+#define DW_SPI_SER 0x10
+#define DW_SPI_BAUDR 0x14
+#define DW_SPI_TXFTLR 0x18
+#define DW_SPI_RXFTLR 0x1c
+#define DW_SPI_TXFLR 0x20
+#define DW_SPI_RXFLR 0x24
+#define DW_SPI_SR 0x28
+#define DW_SPI_IMR 0x2c
+#define DW_SPI_ISR 0x30
+#define DW_SPI_RISR 0x34
+#define DW_SPI_TXOICR 0x38
+#define DW_SPI_RXOICR 0x3c
+#define DW_SPI_RXUICR 0x40
+#define DW_SPI_MSTICR 0x44
+#define DW_SPI_ICR 0x48
+#define DW_SPI_DMACR 0x4c
+#define DW_SPI_DMATDLR 0x50
+#define DW_SPI_DMARDLR 0x54
+#define DW_SPI_IDR 0x58
+#define DW_SPI_VERSION 0x5c
+#define DW_SPI_DR 0x60
+#define DW_SPI_RX_SAMPLE_DLY 0xf0
+#define DW_SPI_CS_OVERRIDE 0xf4
+
+/* Bit fields in CTRLR0 (DWC APB SSI) */
+#define DW_PSSI_CTRLR0_DFS_MASK GENMASK(3, 0)
+#define DW_PSSI_CTRLR0_DFS32_MASK GENMASK(20, 16)
+
+#define DW_PSSI_CTRLR0_FRF_MASK GENMASK(5, 4)
+#define DW_SPI_CTRLR0_FRF_MOTO_SPI 0x0
+#define DW_SPI_CTRLR0_FRF_TI_SSP 0x1
+#define DW_SPI_CTRLR0_FRF_NS_MICROWIRE 0x2
+#define DW_SPI_CTRLR0_FRF_RESV 0x3
+
+#define DW_PSSI_CTRLR0_MODE_MASK GENMASK(7, 6)
+#define DW_PSSI_CTRLR0_SCPHA BIT(6)
+#define DW_PSSI_CTRLR0_SCPOL BIT(7)
+
+#define DW_PSSI_CTRLR0_TMOD_MASK GENMASK(9, 8)
+#define DW_SPI_CTRLR0_TMOD_TR 0x0 /* xmit & recv */
+#define DW_SPI_CTRLR0_TMOD_TO 0x1 /* xmit only */
+#define DW_SPI_CTRLR0_TMOD_RO 0x2 /* recv only */
+#define DW_SPI_CTRLR0_TMOD_EPROMREAD 0x3 /* eeprom read mode */
+
+#define DW_PSSI_CTRLR0_SLV_OE BIT(10)
+#define DW_PSSI_CTRLR0_SRL BIT(11)
+#define DW_PSSI_CTRLR0_CFS BIT(12)
+
+/* Bit fields in CTRLR0 (DWC SSI with AHB interface) */
+#define DW_HSSI_CTRLR0_DFS_MASK GENMASK(4, 0)
+#define DW_HSSI_CTRLR0_FRF_MASK GENMASK(7, 6)
+#define DW_HSSI_CTRLR0_SCPHA BIT(8)
+#define DW_HSSI_CTRLR0_SCPOL BIT(9)
+#define DW_HSSI_CTRLR0_TMOD_MASK GENMASK(11, 10)
+#define DW_HSSI_CTRLR0_SRL BIT(13)
+#define DW_HSSI_CTRLR0_MST BIT(31)
+
+/* Bit fields in CTRLR1 */
+#define DW_SPI_NDF_MASK GENMASK(15, 0)
+
+/* Bit fields in SR, 7 bits */
+#define DW_SPI_SR_MASK GENMASK(6, 0)
+#define DW_SPI_SR_BUSY BIT(0)
+#define DW_SPI_SR_TF_NOT_FULL BIT(1)
+#define DW_SPI_SR_TF_EMPT BIT(2)
+#define DW_SPI_SR_RF_NOT_EMPT BIT(3)
+#define DW_SPI_SR_RF_FULL BIT(4)
+#define DW_SPI_SR_TX_ERR BIT(5)
+#define DW_SPI_SR_DCOL BIT(6)
+
+/* Bit fields in ISR, IMR, RISR, 7 bits */
+#define DW_SPI_INT_MASK GENMASK(5, 0)
+#define DW_SPI_INT_TXEI BIT(0)
+#define DW_SPI_INT_TXOI BIT(1)
+#define DW_SPI_INT_RXUI BIT(2)
+#define DW_SPI_INT_RXOI BIT(3)
+#define DW_SPI_INT_RXFI BIT(4)
+#define DW_SPI_INT_MSTI BIT(5)
+
+/* Bit fields in DMACR */
+#define DW_SPI_DMACR_RDMAE BIT(0)
+#define DW_SPI_DMACR_TDMAE BIT(1)
+
+/* Mem/DMA operations helpers */
+#define DW_SPI_WAIT_RETRIES 5
+#define DW_SPI_BUF_SIZE \
+ (sizeof_field(struct spi_mem_op, cmd.opcode) + \
+ sizeof_field(struct spi_mem_op, addr.val) + 256)
+#define DW_SPI_GET_BYTE(_val, _idx) \
+ ((_val) >> (BITS_PER_BYTE * (_idx)) & 0xff)
+
+/* Slave spi_transfer/spi_mem_op related */
+struct dw_spi_cfg {
+ u8 tmode;
+ u8 dfs;
+ u32 ndf;
+ u32 freq;
+};
+
+struct dw_spi;
+struct dw_spi_dma_ops {
+ int (*dma_init)(struct device *dev, struct dw_spi *dws);
+ void (*dma_exit)(struct dw_spi *dws);
+ int (*dma_setup)(struct dw_spi *dws, struct spi_transfer *xfer);
+ bool (*can_dma)(struct spi_controller *master, struct spi_device *spi,
+ struct spi_transfer *xfer);
+ int (*dma_transfer)(struct dw_spi *dws, struct spi_transfer *xfer);
+ void (*dma_stop)(struct dw_spi *dws);
+};
+
+struct dw_spi {
+ struct spi_controller *master;
+
+ u32 ip; /* Synopsys DW SSI IP-core ID */
+ u32 ver; /* Synopsys component version */
+ u32 caps; /* DW SPI capabilities */
+
+ void __iomem *regs;
+ unsigned long paddr;
+ int irq;
+ u32 fifo_len; /* depth of the FIFO buffer */
+ unsigned int dfs_offset; /* CTRLR0 DFS field offset */
+ u32 max_mem_freq; /* max mem-ops bus freq */
+ u32 max_freq; /* max bus freq supported */
+
+ u32 reg_io_width; /* DR I/O width in bytes */
+ u16 bus_num;
+ u16 num_cs; /* supported slave numbers */
+ void (*set_cs)(struct spi_device *spi, bool enable);
+
+ /* Current message transfer state info */
+ void *tx;
+ unsigned int tx_len;
+ void *rx;
+ unsigned int rx_len;
+ u8 buf[DW_SPI_BUF_SIZE];
+ int dma_mapped;
+ u8 n_bytes; /* current is a 1/2 bytes op */
+ irqreturn_t (*transfer_handler)(struct dw_spi *dws);
+ u32 current_freq; /* frequency in hz */
+ u32 cur_rx_sample_dly;
+ u32 def_rx_sample_dly_ns;
+
+ /* Custom memory operations */
+ struct spi_controller_mem_ops mem_ops;
+
+ /* DMA info */
+ struct dma_chan *txchan;
+ u32 txburst;
+ struct dma_chan *rxchan;
+ u32 rxburst;
+ u32 dma_sg_burst;
+ unsigned long dma_chan_busy;
+ dma_addr_t dma_addr; /* phy address of the Data register */
+ const struct dw_spi_dma_ops *dma_ops;
+ struct completion dma_completion;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+ struct debugfs_regset32 regset;
+#endif
+};
+
+static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
+{
+ return __raw_readl(dws->regs + offset);
+}
+
+static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
+{
+ __raw_writel(val, dws->regs + offset);
+}
+
+static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
+{
+ switch (dws->reg_io_width) {
+ case 2:
+ return readw_relaxed(dws->regs + offset);
+ case 4:
+ default:
+ return readl_relaxed(dws->regs + offset);
+ }
+}
+
+static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
+{
+ switch (dws->reg_io_width) {
+ case 2:
+ writew_relaxed(val, dws->regs + offset);
+ break;
+ case 4:
+ default:
+ writel_relaxed(val, dws->regs + offset);
+ break;
+ }
+}
+
+static inline void dw_spi_enable_chip(struct dw_spi *dws, int enable)
+{
+ dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
+}
+
+static inline void dw_spi_set_clk(struct dw_spi *dws, u16 div)
+{
+ dw_writel(dws, DW_SPI_BAUDR, div);
+}
+
+/* Disable IRQ bits */
+static inline void dw_spi_mask_intr(struct dw_spi *dws, u32 mask)
+{
+ u32 new_mask;
+
+ new_mask = dw_readl(dws, DW_SPI_IMR) & ~mask;
+ dw_writel(dws, DW_SPI_IMR, new_mask);
+}
+
+/* Enable IRQ bits */
+static inline void dw_spi_umask_intr(struct dw_spi *dws, u32 mask)
+{
+ u32 new_mask;
+
+ new_mask = dw_readl(dws, DW_SPI_IMR) | mask;
+ dw_writel(dws, DW_SPI_IMR, new_mask);
+}
+
+/*
+ * This disables the SPI controller, interrupts, clears the interrupts status
+ * and CS, then re-enables the controller back. Transmit and receive FIFO
+ * buffers are cleared when the device is disabled.
+ */
+static inline void dw_spi_reset_chip(struct dw_spi *dws)
+{
+ dw_spi_enable_chip(dws, 0);
+ dw_spi_mask_intr(dws, 0xff);
+ dw_readl(dws, DW_SPI_ICR);
+ dw_writel(dws, DW_SPI_SER, 0);
+ dw_spi_enable_chip(dws, 1);
+}
+
+static inline void dw_spi_shutdown_chip(struct dw_spi *dws)
+{
+ dw_spi_enable_chip(dws, 0);
+ dw_spi_set_clk(dws, 0);
+}
+
+extern void dw_spi_set_cs(struct spi_device *spi, bool enable);
+extern void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
+ struct dw_spi_cfg *cfg);
+extern int dw_spi_check_status(struct dw_spi *dws, bool raw);
+extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
+extern void dw_spi_remove_host(struct dw_spi *dws);
+extern int dw_spi_suspend_host(struct dw_spi *dws);
+extern int dw_spi_resume_host(struct dw_spi *dws);
+
+#ifdef CONFIG_SPI_DW_DMA
+
+extern void dw_spi_dma_setup_mfld(struct dw_spi *dws);
+extern void dw_spi_dma_setup_generic(struct dw_spi *dws);
+
+#else
+
+static inline void dw_spi_dma_setup_mfld(struct dw_spi *dws) {}
+static inline void dw_spi_dma_setup_generic(struct dw_spi *dws) {}
+
+#endif /* !CONFIG_SPI_DW_DMA */
+
+#endif /* __SPI_DW_H__ */
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
new file mode 100644
index 000000000..5896a7b2f
--- /dev/null
+++ b/drivers/spi/spi-ep93xx.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Cirrus Logic EP93xx SPI controller.
+ *
+ * Copyright (C) 2010-2011 Mika Westerberg
+ *
+ * Explicit FIFO handling code was inspired by amba-pl022 driver.
+ *
+ * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
+ *
+ * For more information about the SPI controller see documentation on Cirrus
+ * Logic web site:
+ * https://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/spi/spi.h>
+
+#include <linux/platform_data/dma-ep93xx.h>
+#include <linux/platform_data/spi-ep93xx.h>
+
+#define SSPCR0 0x0000
+#define SSPCR0_SPO BIT(6)
+#define SSPCR0_SPH BIT(7)
+#define SSPCR0_SCR_SHIFT 8
+
+#define SSPCR1 0x0004
+#define SSPCR1_RIE BIT(0)
+#define SSPCR1_TIE BIT(1)
+#define SSPCR1_RORIE BIT(2)
+#define SSPCR1_LBM BIT(3)
+#define SSPCR1_SSE BIT(4)
+#define SSPCR1_MS BIT(5)
+#define SSPCR1_SOD BIT(6)
+
+#define SSPDR 0x0008
+
+#define SSPSR 0x000c
+#define SSPSR_TFE BIT(0)
+#define SSPSR_TNF BIT(1)
+#define SSPSR_RNE BIT(2)
+#define SSPSR_RFF BIT(3)
+#define SSPSR_BSY BIT(4)
+#define SSPCPSR 0x0010
+
+#define SSPIIR 0x0014
+#define SSPIIR_RIS BIT(0)
+#define SSPIIR_TIS BIT(1)
+#define SSPIIR_RORIS BIT(2)
+#define SSPICR SSPIIR
+
+/* timeout in milliseconds */
+#define SPI_TIMEOUT 5
+/* maximum depth of RX/TX FIFO */
+#define SPI_FIFO_SIZE 8
+
+/**
+ * struct ep93xx_spi - EP93xx SPI controller structure
+ * @clk: clock for the controller
+ * @mmio: pointer to ioremap()'d registers
+ * @sspdr_phys: physical address of the SSPDR register
+ * @tx: current byte in transfer to transmit
+ * @rx: current byte in transfer to receive
+ * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
+ * frame decreases this level and sending one frame increases it.
+ * @dma_rx: RX DMA channel
+ * @dma_tx: TX DMA channel
+ * @dma_rx_data: RX parameters passed to the DMA engine
+ * @dma_tx_data: TX parameters passed to the DMA engine
+ * @rx_sgt: sg table for RX transfers
+ * @tx_sgt: sg table for TX transfers
+ * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
+ * the client
+ */
+struct ep93xx_spi {
+ struct clk *clk;
+ void __iomem *mmio;
+ unsigned long sspdr_phys;
+ size_t tx;
+ size_t rx;
+ size_t fifo_level;
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+ struct ep93xx_dma_data dma_rx_data;
+ struct ep93xx_dma_data dma_tx_data;
+ struct sg_table rx_sgt;
+ struct sg_table tx_sgt;
+ void *zeropage;
+};
+
+/* converts bits per word to CR0.DSS value */
+#define bits_per_word_to_dss(bpw) ((bpw) - 1)
+
+/**
+ * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
+ * @master: SPI master
+ * @rate: desired SPI output clock rate
+ * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
+ * @div_scr: pointer to return the scr divider
+ */
+static int ep93xx_spi_calc_divisors(struct spi_master *master,
+ u32 rate, u8 *div_cpsr, u8 *div_scr)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ unsigned long spi_clk_rate = clk_get_rate(espi->clk);
+ int cpsr, scr;
+
+ /*
+ * Make sure that max value is between values supported by the
+ * controller.
+ */
+ rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
+
+ /*
+ * Calculate divisors so that we can get speed according the
+ * following formula:
+ * rate = spi_clock_rate / (cpsr * (1 + scr))
+ *
+ * cpsr must be even number and starts from 2, scr can be any number
+ * between 0 and 255.
+ */
+ for (cpsr = 2; cpsr <= 254; cpsr += 2) {
+ for (scr = 0; scr <= 255; scr++) {
+ if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
+ *div_scr = (u8)scr;
+ *div_cpsr = (u8)cpsr;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ep93xx_spi_chip_setup(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ u8 dss = bits_per_word_to_dss(xfer->bits_per_word);
+ u8 div_cpsr = 0;
+ u8 div_scr = 0;
+ u16 cr0;
+ int err;
+
+ err = ep93xx_spi_calc_divisors(master, xfer->speed_hz,
+ &div_cpsr, &div_scr);
+ if (err)
+ return err;
+
+ cr0 = div_scr << SSPCR0_SCR_SHIFT;
+ if (spi->mode & SPI_CPOL)
+ cr0 |= SSPCR0_SPO;
+ if (spi->mode & SPI_CPHA)
+ cr0 |= SSPCR0_SPH;
+ cr0 |= dss;
+
+ dev_dbg(&master->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
+ spi->mode, div_cpsr, div_scr, dss);
+ dev_dbg(&master->dev, "setup: cr0 %#x\n", cr0);
+
+ writel(div_cpsr, espi->mmio + SSPCPSR);
+ writel(cr0, espi->mmio + SSPCR0);
+
+ return 0;
+}
+
+static void ep93xx_do_write(struct spi_master *master)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct spi_transfer *xfer = master->cur_msg->state;
+ u32 val = 0;
+
+ if (xfer->bits_per_word > 8) {
+ if (xfer->tx_buf)
+ val = ((u16 *)xfer->tx_buf)[espi->tx];
+ espi->tx += 2;
+ } else {
+ if (xfer->tx_buf)
+ val = ((u8 *)xfer->tx_buf)[espi->tx];
+ espi->tx += 1;
+ }
+ writel(val, espi->mmio + SSPDR);
+}
+
+static void ep93xx_do_read(struct spi_master *master)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct spi_transfer *xfer = master->cur_msg->state;
+ u32 val;
+
+ val = readl(espi->mmio + SSPDR);
+ if (xfer->bits_per_word > 8) {
+ if (xfer->rx_buf)
+ ((u16 *)xfer->rx_buf)[espi->rx] = val;
+ espi->rx += 2;
+ } else {
+ if (xfer->rx_buf)
+ ((u8 *)xfer->rx_buf)[espi->rx] = val;
+ espi->rx += 1;
+ }
+}
+
+/**
+ * ep93xx_spi_read_write() - perform next RX/TX transfer
+ * @master: SPI master
+ *
+ * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
+ * called several times, the whole transfer will be completed. Returns
+ * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
+ *
+ * When this function is finished, RX FIFO should be empty and TX FIFO should be
+ * full.
+ */
+static int ep93xx_spi_read_write(struct spi_master *master)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct spi_transfer *xfer = master->cur_msg->state;
+
+ /* read as long as RX FIFO has frames in it */
+ while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) {
+ ep93xx_do_read(master);
+ espi->fifo_level--;
+ }
+
+ /* write as long as TX FIFO has room */
+ while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < xfer->len) {
+ ep93xx_do_write(master);
+ espi->fifo_level++;
+ }
+
+ if (espi->rx == xfer->len)
+ return 0;
+
+ return -EINPROGRESS;
+}
+
+static enum dma_transfer_direction
+ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ return DMA_MEM_TO_DEV;
+ case DMA_FROM_DEVICE:
+ return DMA_DEV_TO_MEM;
+ default:
+ return DMA_TRANS_NONE;
+ }
+}
+
+/**
+ * ep93xx_spi_dma_prepare() - prepares a DMA transfer
+ * @master: SPI master
+ * @dir: DMA transfer direction
+ *
+ * Function configures the DMA, maps the buffer and prepares the DMA
+ * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
+ * in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_spi_dma_prepare(struct spi_master *master,
+ enum dma_data_direction dir)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct spi_transfer *xfer = master->cur_msg->state;
+ struct dma_async_tx_descriptor *txd;
+ enum dma_slave_buswidth buswidth;
+ struct dma_slave_config conf;
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+ struct dma_chan *chan;
+ const void *buf, *pbuf;
+ size_t len = xfer->len;
+ int i, ret, nents;
+
+ if (xfer->bits_per_word > 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = ep93xx_dma_data_to_trans_dir(dir);
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = espi->dma_rx;
+ buf = xfer->rx_buf;
+ sgt = &espi->rx_sgt;
+
+ conf.src_addr = espi->sspdr_phys;
+ conf.src_addr_width = buswidth;
+ } else {
+ chan = espi->dma_tx;
+ buf = xfer->tx_buf;
+ sgt = &espi->tx_sgt;
+
+ conf.dst_addr = espi->sspdr_phys;
+ conf.dst_addr_width = buswidth;
+ }
+
+ ret = dmaengine_slave_config(chan, &conf);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * We need to split the transfer into PAGE_SIZE'd chunks. This is
+ * because we are using @espi->zeropage to provide a zero RX buffer
+ * for the TX transfers and we have only allocated one page for that.
+ *
+ * For performance reasons we allocate a new sg_table only when
+ * needed. Otherwise we will re-use the current one. Eventually the
+ * last sg_table is released in ep93xx_spi_release_dma().
+ */
+
+ nents = DIV_ROUND_UP(len, PAGE_SIZE);
+ if (nents != sgt->nents) {
+ sg_free_table(sgt);
+
+ ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ pbuf = buf;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = min_t(size_t, len, PAGE_SIZE);
+
+ if (buf) {
+ sg_set_page(sg, virt_to_page(pbuf), bytes,
+ offset_in_page(pbuf));
+ } else {
+ sg_set_page(sg, virt_to_page(espi->zeropage),
+ bytes, 0);
+ }
+
+ pbuf += bytes;
+ len -= bytes;
+ }
+
+ if (WARN_ON(len)) {
+ dev_warn(&master->dev, "len = %zu expected 0!\n", len);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents)
+ return ERR_PTR(-ENOMEM);
+
+ txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction,
+ DMA_CTRL_ACK);
+ if (!txd) {
+ dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+ return ERR_PTR(-ENOMEM);
+ }
+ return txd;
+}
+
+/**
+ * ep93xx_spi_dma_finish() - finishes with a DMA transfer
+ * @master: SPI master
+ * @dir: DMA transfer direction
+ *
+ * Function finishes with the DMA transfer. After this, the DMA buffer is
+ * unmapped.
+ */
+static void ep93xx_spi_dma_finish(struct spi_master *master,
+ enum dma_data_direction dir)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct dma_chan *chan;
+ struct sg_table *sgt;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = espi->dma_rx;
+ sgt = &espi->rx_sgt;
+ } else {
+ chan = espi->dma_tx;
+ sgt = &espi->tx_sgt;
+ }
+
+ dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+}
+
+static void ep93xx_spi_dma_callback(void *callback_param)
+{
+ struct spi_master *master = callback_param;
+
+ ep93xx_spi_dma_finish(master, DMA_TO_DEVICE);
+ ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
+
+ spi_finalize_current_transfer(master);
+}
+
+static int ep93xx_spi_dma_transfer(struct spi_master *master)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct dma_async_tx_descriptor *rxd, *txd;
+
+ rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE);
+ if (IS_ERR(rxd)) {
+ dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
+ return PTR_ERR(rxd);
+ }
+
+ txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE);
+ if (IS_ERR(txd)) {
+ ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
+ dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
+ return PTR_ERR(txd);
+ }
+
+ /* We are ready when RX is done */
+ rxd->callback = ep93xx_spi_dma_callback;
+ rxd->callback_param = master;
+
+ /* Now submit both descriptors and start DMA */
+ dmaengine_submit(rxd);
+ dmaengine_submit(txd);
+
+ dma_async_issue_pending(espi->dma_rx);
+ dma_async_issue_pending(espi->dma_tx);
+
+ /* signal that we need to wait for completion */
+ return 1;
+}
+
+static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ u32 val;
+
+ /*
+ * If we got ROR (receive overrun) interrupt we know that something is
+ * wrong. Just abort the message.
+ */
+ if (readl(espi->mmio + SSPIIR) & SSPIIR_RORIS) {
+ /* clear the overrun interrupt */
+ writel(0, espi->mmio + SSPICR);
+ dev_warn(&master->dev,
+ "receive overrun, aborting the message\n");
+ master->cur_msg->status = -EIO;
+ } else {
+ /*
+ * Interrupt is either RX (RIS) or TX (TIS). For both cases we
+ * simply execute next data transfer.
+ */
+ if (ep93xx_spi_read_write(master)) {
+ /*
+ * In normal case, there still is some processing left
+ * for current transfer. Let's wait for the next
+ * interrupt then.
+ */
+ return IRQ_HANDLED;
+ }
+ }
+
+ /*
+ * Current transfer is finished, either with error or with success. In
+ * any case we disable interrupts and notify the worker to handle
+ * any post-processing of the message.
+ */
+ val = readl(espi->mmio + SSPCR1);
+ val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
+ writel(val, espi->mmio + SSPCR1);
+
+ spi_finalize_current_transfer(master);
+
+ return IRQ_HANDLED;
+}
+
+static int ep93xx_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ u32 val;
+ int ret;
+
+ ret = ep93xx_spi_chip_setup(master, spi, xfer);
+ if (ret) {
+ dev_err(&master->dev, "failed to setup chip for transfer\n");
+ return ret;
+ }
+
+ master->cur_msg->state = xfer;
+ espi->rx = 0;
+ espi->tx = 0;
+
+ /*
+ * There is no point of setting up DMA for the transfers which will
+ * fit into the FIFO and can be transferred with a single interrupt.
+ * So in these cases we will be using PIO and don't bother for DMA.
+ */
+ if (espi->dma_rx && xfer->len > SPI_FIFO_SIZE)
+ return ep93xx_spi_dma_transfer(master);
+
+ /* Using PIO so prime the TX FIFO and enable interrupts */
+ ep93xx_spi_read_write(master);
+
+ val = readl(espi->mmio + SSPCR1);
+ val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
+ writel(val, espi->mmio + SSPCR1);
+
+ /* signal that we need to wait for completion */
+ return 1;
+}
+
+static int ep93xx_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ unsigned long timeout;
+
+ /*
+ * Just to be sure: flush any data from RX FIFO.
+ */
+ timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
+ while (readl(espi->mmio + SSPSR) & SSPSR_RNE) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(&master->dev,
+ "timeout while flushing RX FIFO\n");
+ return -ETIMEDOUT;
+ }
+ readl(espi->mmio + SSPDR);
+ }
+
+ /*
+ * We explicitly handle FIFO level. This way we don't have to check TX
+ * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
+ */
+ espi->fifo_level = 0;
+
+ return 0;
+}
+
+static int ep93xx_spi_prepare_hardware(struct spi_master *master)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(espi->clk);
+ if (ret)
+ return ret;
+
+ val = readl(espi->mmio + SSPCR1);
+ val |= SSPCR1_SSE;
+ writel(val, espi->mmio + SSPCR1);
+
+ return 0;
+}
+
+static int ep93xx_spi_unprepare_hardware(struct spi_master *master)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ u32 val;
+
+ val = readl(espi->mmio + SSPCR1);
+ val &= ~SSPCR1_SSE;
+ writel(val, espi->mmio + SSPCR1);
+
+ clk_disable_unprepare(espi->clk);
+
+ return 0;
+}
+
+static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ if (ep93xx_dma_chan_is_m2p(chan))
+ return false;
+
+ chan->private = filter_param;
+ return true;
+}
+
+static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
+{
+ dma_cap_mask_t mask;
+ int ret;
+
+ espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!espi->zeropage)
+ return -ENOMEM;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ espi->dma_rx_data.port = EP93XX_DMA_SSP;
+ espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
+ espi->dma_rx_data.name = "ep93xx-spi-rx";
+
+ espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+ &espi->dma_rx_data);
+ if (!espi->dma_rx) {
+ ret = -ENODEV;
+ goto fail_free_page;
+ }
+
+ espi->dma_tx_data.port = EP93XX_DMA_SSP;
+ espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
+ espi->dma_tx_data.name = "ep93xx-spi-tx";
+
+ espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+ &espi->dma_tx_data);
+ if (!espi->dma_tx) {
+ ret = -ENODEV;
+ goto fail_release_rx;
+ }
+
+ return 0;
+
+fail_release_rx:
+ dma_release_channel(espi->dma_rx);
+ espi->dma_rx = NULL;
+fail_free_page:
+ free_page((unsigned long)espi->zeropage);
+
+ return ret;
+}
+
+static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
+{
+ if (espi->dma_rx) {
+ dma_release_channel(espi->dma_rx);
+ sg_free_table(&espi->rx_sgt);
+ }
+ if (espi->dma_tx) {
+ dma_release_channel(espi->dma_tx);
+ sg_free_table(&espi->tx_sgt);
+ }
+
+ if (espi->zeropage)
+ free_page((unsigned long)espi->zeropage);
+}
+
+static int ep93xx_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct ep93xx_spi_info *info;
+ struct ep93xx_spi *espi;
+ struct resource *res;
+ int irq;
+ int error;
+
+ info = dev_get_platdata(&pdev->dev);
+ if (!info) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EBUSY;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "unable to get iomem resource\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*espi));
+ if (!master)
+ return -ENOMEM;
+
+ master->use_gpio_descriptors = true;
+ master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
+ master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
+ master->prepare_message = ep93xx_spi_prepare_message;
+ master->transfer_one = ep93xx_spi_transfer_one;
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ /*
+ * The SPI core will count the number of GPIO descriptors to figure
+ * out the number of chip selects available on the platform.
+ */
+ master->num_chipselect = 0;
+
+ platform_set_drvdata(pdev, master);
+
+ espi = spi_master_get_devdata(master);
+
+ espi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(espi->clk)) {
+ dev_err(&pdev->dev, "unable to get spi clock\n");
+ error = PTR_ERR(espi->clk);
+ goto fail_release_master;
+ }
+
+ /*
+ * Calculate maximum and minimum supported clock rates
+ * for the controller.
+ */
+ master->max_speed_hz = clk_get_rate(espi->clk) / 2;
+ master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
+
+ espi->sspdr_phys = res->start + SSPDR;
+
+ espi->mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(espi->mmio)) {
+ error = PTR_ERR(espi->mmio);
+ goto fail_release_master;
+ }
+
+ error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
+ 0, "ep93xx-spi", master);
+ if (error) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ goto fail_release_master;
+ }
+
+ if (info->use_dma && ep93xx_spi_setup_dma(espi))
+ dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
+
+ /* make sure that the hardware is disabled */
+ writel(0, espi->mmio + SSPCR1);
+
+ error = devm_spi_register_master(&pdev->dev, master);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register SPI master\n");
+ goto fail_free_dma;
+ }
+
+ dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
+ (unsigned long)res->start, irq);
+
+ return 0;
+
+fail_free_dma:
+ ep93xx_spi_release_dma(espi);
+fail_release_master:
+ spi_master_put(master);
+
+ return error;
+}
+
+static int ep93xx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+
+ ep93xx_spi_release_dma(espi);
+
+ return 0;
+}
+
+static struct platform_driver ep93xx_spi_driver = {
+ .driver = {
+ .name = "ep93xx-spi",
+ },
+ .probe = ep93xx_spi_probe,
+ .remove = ep93xx_spi_remove,
+};
+module_platform_driver(ep93xx_spi_driver);
+
+MODULE_DESCRIPTION("EP93xx SPI Controller driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ep93xx-spi");
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
new file mode 100644
index 000000000..a7d4dffac
--- /dev/null
+++ b/drivers/spi/spi-falcon.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+#define DRV_NAME "sflash-falcon"
+
+#define FALCON_SPI_XFER_BEGIN (1 << 0)
+#define FALCON_SPI_XFER_END (1 << 1)
+
+/* Bus Read Configuration Register0 */
+#define BUSRCON0 0x00000010
+/* Bus Write Configuration Register0 */
+#define BUSWCON0 0x00000018
+/* Serial Flash Configuration Register */
+#define SFCON 0x00000080
+/* Serial Flash Time Register */
+#define SFTIME 0x00000084
+/* Serial Flash Status Register */
+#define SFSTAT 0x00000088
+/* Serial Flash Command Register */
+#define SFCMD 0x0000008C
+/* Serial Flash Address Register */
+#define SFADDR 0x00000090
+/* Serial Flash Data Register */
+#define SFDATA 0x00000094
+/* Serial Flash I/O Control Register */
+#define SFIO 0x00000098
+/* EBU Clock Control Register */
+#define EBUCC 0x000000C4
+
+/* Dummy Phase Length */
+#define SFCMD_DUMLEN_OFFSET 16
+#define SFCMD_DUMLEN_MASK 0x000F0000
+/* Chip Select */
+#define SFCMD_CS_OFFSET 24
+#define SFCMD_CS_MASK 0x07000000
+/* field offset */
+#define SFCMD_ALEN_OFFSET 20
+#define SFCMD_ALEN_MASK 0x00700000
+/* SCK Rise-edge Position */
+#define SFTIME_SCKR_POS_OFFSET 8
+#define SFTIME_SCKR_POS_MASK 0x00000F00
+/* SCK Period */
+#define SFTIME_SCK_PER_OFFSET 0
+#define SFTIME_SCK_PER_MASK 0x0000000F
+/* SCK Fall-edge Position */
+#define SFTIME_SCKF_POS_OFFSET 12
+#define SFTIME_SCKF_POS_MASK 0x0000F000
+/* Device Size */
+#define SFCON_DEV_SIZE_A23_0 0x03000000
+#define SFCON_DEV_SIZE_MASK 0x0F000000
+/* Read Data Position */
+#define SFTIME_RD_POS_MASK 0x000F0000
+/* Data Output */
+#define SFIO_UNUSED_WD_MASK 0x0000000F
+/* Command Opcode mask */
+#define SFCMD_OPC_MASK 0x000000FF
+/* dlen bytes of data to write */
+#define SFCMD_DIR_WRITE 0x00000100
+/* Data Length offset */
+#define SFCMD_DLEN_OFFSET 9
+/* Command Error */
+#define SFSTAT_CMD_ERR 0x20000000
+/* Access Command Pending */
+#define SFSTAT_CMD_PEND 0x00400000
+/* Frequency set to 100MHz. */
+#define EBUCC_EBUDIV_SELF100 0x00000001
+/* Serial Flash */
+#define BUSRCON0_AGEN_SERIAL_FLASH 0xF0000000
+/* 8-bit multiplexed */
+#define BUSRCON0_PORTW_8_BIT_MUX 0x00000000
+/* Serial Flash */
+#define BUSWCON0_AGEN_SERIAL_FLASH 0xF0000000
+/* Chip Select after opcode */
+#define SFCMD_KEEP_CS_KEEP_SELECTED 0x00008000
+
+#define CLOCK_100M 100000000
+#define CLOCK_50M 50000000
+
+struct falcon_sflash {
+ u32 sfcmd; /* for caching of opcode, direction, ... */
+ struct spi_master *master;
+};
+
+int falcon_sflash_xfer(struct spi_device *spi, struct spi_transfer *t,
+ unsigned long flags)
+{
+ struct device *dev = &spi->dev;
+ struct falcon_sflash *priv = spi_master_get_devdata(spi->master);
+ const u8 *txp = t->tx_buf;
+ u8 *rxp = t->rx_buf;
+ unsigned int bytelen = ((8 * t->len + 7) / 8);
+ unsigned int len, alen, dumlen;
+ u32 val;
+ enum {
+ state_init,
+ state_command_prepare,
+ state_write,
+ state_read,
+ state_disable_cs,
+ state_end
+ } state = state_init;
+
+ do {
+ switch (state) {
+ case state_init: /* detect phase of upper layer sequence */
+ {
+ /* initial write ? */
+ if (flags & FALCON_SPI_XFER_BEGIN) {
+ if (!txp) {
+ dev_err(dev,
+ "BEGIN without tx data!\n");
+ return -ENODATA;
+ }
+ /*
+ * Prepare the parts of the sfcmd register,
+ * which should not change during a sequence!
+ * Only exception are the length fields,
+ * especially alen and dumlen.
+ */
+
+ priv->sfcmd = ((spi->chip_select
+ << SFCMD_CS_OFFSET)
+ & SFCMD_CS_MASK);
+ priv->sfcmd |= SFCMD_KEEP_CS_KEEP_SELECTED;
+ priv->sfcmd |= *txp;
+ txp++;
+ bytelen--;
+ if (bytelen) {
+ /*
+ * more data:
+ * maybe address and/or dummy
+ */
+ state = state_command_prepare;
+ break;
+ } else {
+ dev_dbg(dev, "write cmd %02X\n",
+ priv->sfcmd & SFCMD_OPC_MASK);
+ }
+ }
+ /* continued write ? */
+ if (txp && bytelen) {
+ state = state_write;
+ break;
+ }
+ /* read data? */
+ if (rxp && bytelen) {
+ state = state_read;
+ break;
+ }
+ /* end of sequence? */
+ if (flags & FALCON_SPI_XFER_END)
+ state = state_disable_cs;
+ else
+ state = state_end;
+ break;
+ }
+ /* collect tx data for address and dummy phase */
+ case state_command_prepare:
+ {
+ /* txp is valid, already checked */
+ val = 0;
+ alen = 0;
+ dumlen = 0;
+ while (bytelen > 0) {
+ if (alen < 3) {
+ val = (val << 8) | (*txp++);
+ alen++;
+ } else if ((dumlen < 15) && (*txp == 0)) {
+ /*
+ * assume dummy bytes are set to 0
+ * from upper layer
+ */
+ dumlen++;
+ txp++;
+ } else {
+ break;
+ }
+ bytelen--;
+ }
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK | SFCMD_DUMLEN_MASK);
+ priv->sfcmd |= (alen << SFCMD_ALEN_OFFSET) |
+ (dumlen << SFCMD_DUMLEN_OFFSET);
+ if (alen > 0)
+ ltq_ebu_w32(val, SFADDR);
+
+ dev_dbg(dev, "wr %02X, alen=%d (addr=%06X) dlen=%d\n",
+ priv->sfcmd & SFCMD_OPC_MASK,
+ alen, val, dumlen);
+
+ if (bytelen > 0) {
+ /* continue with write */
+ state = state_write;
+ } else if (flags & FALCON_SPI_XFER_END) {
+ /* end of sequence? */
+ state = state_disable_cs;
+ } else {
+ /*
+ * go to end and expect another
+ * call (read or write)
+ */
+ state = state_end;
+ }
+ break;
+ }
+ case state_write:
+ {
+ /* txp still valid */
+ priv->sfcmd |= SFCMD_DIR_WRITE;
+ len = 0;
+ val = 0;
+ do {
+ if (bytelen--)
+ val |= (*txp++) << (8 * len++);
+ if ((flags & FALCON_SPI_XFER_END)
+ && (bytelen == 0)) {
+ priv->sfcmd &=
+ ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ }
+ if ((len == 4) || (bytelen == 0)) {
+ ltq_ebu_w32(val, SFDATA);
+ ltq_ebu_w32(priv->sfcmd
+ | (len<<SFCMD_DLEN_OFFSET),
+ SFCMD);
+ len = 0;
+ val = 0;
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK
+ | SFCMD_DUMLEN_MASK);
+ }
+ } while (bytelen);
+ state = state_end;
+ break;
+ }
+ case state_read:
+ {
+ /* read data */
+ priv->sfcmd &= ~SFCMD_DIR_WRITE;
+ do {
+ if ((flags & FALCON_SPI_XFER_END)
+ && (bytelen <= 4)) {
+ priv->sfcmd &=
+ ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ }
+ len = (bytelen > 4) ? 4 : bytelen;
+ bytelen -= len;
+ ltq_ebu_w32(priv->sfcmd
+ | (len << SFCMD_DLEN_OFFSET), SFCMD);
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK
+ | SFCMD_DUMLEN_MASK);
+ do {
+ val = ltq_ebu_r32(SFSTAT);
+ if (val & SFSTAT_CMD_ERR) {
+ /* reset error status */
+ dev_err(dev, "SFSTAT: CMD_ERR");
+ dev_err(dev, " (%x)\n", val);
+ ltq_ebu_w32(SFSTAT_CMD_ERR,
+ SFSTAT);
+ return -EBADE;
+ }
+ } while (val & SFSTAT_CMD_PEND);
+ val = ltq_ebu_r32(SFDATA);
+ do {
+ *rxp = (val & 0xFF);
+ rxp++;
+ val >>= 8;
+ len--;
+ } while (len);
+ } while (bytelen);
+ state = state_end;
+ break;
+ }
+ case state_disable_cs:
+ {
+ priv->sfcmd &= ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ ltq_ebu_w32(priv->sfcmd | (0 << SFCMD_DLEN_OFFSET),
+ SFCMD);
+ val = ltq_ebu_r32(SFSTAT);
+ if (val & SFSTAT_CMD_ERR) {
+ /* reset error status */
+ dev_err(dev, "SFSTAT: CMD_ERR (%x)\n", val);
+ ltq_ebu_w32(SFSTAT_CMD_ERR, SFSTAT);
+ return -EBADE;
+ }
+ state = state_end;
+ break;
+ }
+ case state_end:
+ break;
+ }
+ } while (state != state_end);
+
+ return 0;
+}
+
+static int falcon_sflash_setup(struct spi_device *spi)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+
+ if (spi->max_speed_hz >= CLOCK_100M) {
+ /* set EBU clock to 100 MHz */
+ ltq_sys1_w32_mask(0, EBUCC_EBUDIV_SELF100, EBUCC);
+ i = 1; /* divider */
+ } else {
+ /* set EBU clock to 50 MHz */
+ ltq_sys1_w32_mask(EBUCC_EBUDIV_SELF100, 0, EBUCC);
+
+ /* search for suitable divider */
+ for (i = 1; i < 7; i++) {
+ if (CLOCK_50M / i <= spi->max_speed_hz)
+ break;
+ }
+ }
+
+ /* setup period of serial clock */
+ ltq_ebu_w32_mask(SFTIME_SCKF_POS_MASK
+ | SFTIME_SCKR_POS_MASK
+ | SFTIME_SCK_PER_MASK,
+ (i << SFTIME_SCKR_POS_OFFSET)
+ | (i << (SFTIME_SCK_PER_OFFSET + 1)),
+ SFTIME);
+
+ /*
+ * set some bits of unused_wd, to not trigger HOLD/WP
+ * signals on non QUAD flashes
+ */
+ ltq_ebu_w32((SFIO_UNUSED_WD_MASK & (0x8 | 0x4)), SFIO);
+
+ ltq_ebu_w32(BUSRCON0_AGEN_SERIAL_FLASH | BUSRCON0_PORTW_8_BIT_MUX,
+ BUSRCON0);
+ ltq_ebu_w32(BUSWCON0_AGEN_SERIAL_FLASH, BUSWCON0);
+ /* set address wrap around to maximum for 24-bit addresses */
+ ltq_ebu_w32_mask(SFCON_DEV_SIZE_MASK, SFCON_DEV_SIZE_A23_0, SFCON);
+
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ return 0;
+}
+
+static int falcon_sflash_xfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct falcon_sflash *priv = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ unsigned long spi_flags;
+ unsigned long flags;
+ int ret = 0;
+
+ priv->sfcmd = 0;
+ m->actual_length = 0;
+
+ spi_flags = FALCON_SPI_XFER_BEGIN;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (list_is_last(&t->transfer_list, &m->transfers))
+ spi_flags |= FALCON_SPI_XFER_END;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ret = falcon_sflash_xfer(m->spi, t, spi_flags);
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ if (ret)
+ break;
+
+ m->actual_length += t->len;
+
+ WARN_ON(t->delay.value || t->cs_change);
+ spi_flags = 0;
+ }
+
+ m->status = ret;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static int falcon_sflash_probe(struct platform_device *pdev)
+{
+ struct falcon_sflash *priv;
+ struct spi_master *master;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*priv));
+ if (!master)
+ return -ENOMEM;
+
+ priv = spi_master_get_devdata(master);
+ priv->master = master;
+
+ master->mode_bits = SPI_MODE_3;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = falcon_sflash_setup;
+ master->transfer_one_message = falcon_sflash_xfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ spi_master_put(master);
+ return ret;
+}
+
+static const struct of_device_id falcon_sflash_match[] = {
+ { .compatible = "lantiq,sflash-falcon" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, falcon_sflash_match);
+
+static struct platform_driver falcon_sflash_driver = {
+ .probe = falcon_sflash_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = falcon_sflash_match,
+ }
+};
+
+module_platform_driver(falcon_sflash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Lantiq Falcon SPI/SFLASH controller driver");
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
new file mode 100644
index 000000000..cf1e4f9eb
--- /dev/null
+++ b/drivers/spi/spi-fsi.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (C) IBM Corporation 2020
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/fsi.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#define FSI_ENGID_SPI 0x23
+#define FSI_MBOX_ROOT_CTRL_8 0x2860
+#define FSI_MBOX_ROOT_CTRL_8_SPI_MUX 0xf0000000
+
+#define FSI2SPI_DATA0 0x00
+#define FSI2SPI_DATA1 0x04
+#define FSI2SPI_CMD 0x08
+#define FSI2SPI_CMD_WRITE BIT(31)
+#define FSI2SPI_RESET 0x18
+#define FSI2SPI_STATUS 0x1c
+#define FSI2SPI_STATUS_ANY_ERROR BIT(31)
+#define FSI2SPI_IRQ 0x20
+
+#define SPI_FSI_BASE 0x70000
+#define SPI_FSI_TIMEOUT_MS 1000
+#define SPI_FSI_MAX_RX_SIZE 8
+#define SPI_FSI_MAX_TX_SIZE 40
+
+#define SPI_FSI_ERROR 0x0
+#define SPI_FSI_COUNTER_CFG 0x1
+#define SPI_FSI_CFG1 0x2
+#define SPI_FSI_CLOCK_CFG 0x3
+#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
+#define SPI_FSI_CLOCK_CFG_ECC_DISABLE (BIT_ULL(35) | BIT_ULL(33))
+#define SPI_FSI_CLOCK_CFG_RESET1 (BIT_ULL(36) | BIT_ULL(38))
+#define SPI_FSI_CLOCK_CFG_RESET2 (BIT_ULL(37) | BIT_ULL(39))
+#define SPI_FSI_CLOCK_CFG_MODE (BIT_ULL(41) | BIT_ULL(42))
+#define SPI_FSI_CLOCK_CFG_SCK_RECV_DEL GENMASK_ULL(51, 44)
+#define SPI_FSI_CLOCK_CFG_SCK_NO_DEL BIT_ULL(51)
+#define SPI_FSI_CLOCK_CFG_SCK_DIV GENMASK_ULL(63, 52)
+#define SPI_FSI_MMAP 0x4
+#define SPI_FSI_DATA_TX 0x5
+#define SPI_FSI_DATA_RX 0x6
+#define SPI_FSI_SEQUENCE 0x7
+#define SPI_FSI_SEQUENCE_STOP 0x00
+#define SPI_FSI_SEQUENCE_SEL_SLAVE(x) (0x10 | ((x) & 0xf))
+#define SPI_FSI_SEQUENCE_SHIFT_OUT(x) (0x30 | ((x) & 0xf))
+#define SPI_FSI_SEQUENCE_SHIFT_IN(x) (0x40 | ((x) & 0xf))
+#define SPI_FSI_SEQUENCE_COPY_DATA_TX 0xc0
+#define SPI_FSI_SEQUENCE_BRANCH(x) (0xe0 | ((x) & 0xf))
+#define SPI_FSI_STATUS 0x8
+#define SPI_FSI_STATUS_ERROR \
+ (GENMASK_ULL(31, 21) | GENMASK_ULL(15, 12))
+#define SPI_FSI_STATUS_SEQ_STATE GENMASK_ULL(55, 48)
+#define SPI_FSI_STATUS_SEQ_STATE_IDLE BIT_ULL(48)
+#define SPI_FSI_STATUS_TDR_UNDERRUN BIT_ULL(57)
+#define SPI_FSI_STATUS_TDR_OVERRUN BIT_ULL(58)
+#define SPI_FSI_STATUS_TDR_FULL BIT_ULL(59)
+#define SPI_FSI_STATUS_RDR_UNDERRUN BIT_ULL(61)
+#define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
+#define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
+#define SPI_FSI_STATUS_ANY_ERROR \
+ (SPI_FSI_STATUS_ERROR | \
+ SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
+ SPI_FSI_STATUS_RDR_OVERRUN)
+#define SPI_FSI_PORT_CTRL 0x9
+
+struct fsi2spi {
+ struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
+ struct mutex lock; /* lock access to the device */
+};
+
+struct fsi_spi {
+ struct device *dev; /* SPI controller device */
+ struct fsi2spi *bridge; /* FSI2SPI device */
+ u32 base;
+};
+
+struct fsi_spi_sequence {
+ int bit;
+ u64 data;
+};
+
+static int fsi_spi_check_mux(struct fsi_device *fsi, struct device *dev)
+{
+ int rc;
+ u32 root_ctrl_8;
+ __be32 root_ctrl_8_be;
+
+ rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8_be,
+ sizeof(root_ctrl_8_be));
+ if (rc)
+ return rc;
+
+ root_ctrl_8 = be32_to_cpu(root_ctrl_8_be);
+ dev_dbg(dev, "Root control register 8: %08x\n", root_ctrl_8);
+ if ((root_ctrl_8 & FSI_MBOX_ROOT_CTRL_8_SPI_MUX) ==
+ FSI_MBOX_ROOT_CTRL_8_SPI_MUX)
+ return 0;
+
+ return -ENOLINK;
+}
+
+static int fsi_spi_check_status(struct fsi_spi *ctx)
+{
+ int rc;
+ u32 sts;
+ __be32 sts_be;
+
+ rc = fsi_device_read(ctx->bridge->fsi, FSI2SPI_STATUS, &sts_be,
+ sizeof(sts_be));
+ if (rc)
+ return rc;
+
+ sts = be32_to_cpu(sts_be);
+ if (sts & FSI2SPI_STATUS_ANY_ERROR) {
+ dev_err(ctx->dev, "Error with FSI2SPI interface: %08x.\n", sts);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value)
+{
+ int rc = 0;
+ __be32 cmd_be;
+ __be32 data_be;
+ u32 cmd = offset + ctx->base;
+ struct fsi2spi *bridge = ctx->bridge;
+
+ *value = 0ULL;
+
+ if (cmd & FSI2SPI_CMD_WRITE)
+ return -EINVAL;
+
+ rc = mutex_lock_interruptible(&bridge->lock);
+ if (rc)
+ return rc;
+
+ cmd_be = cpu_to_be32(cmd);
+ rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be,
+ sizeof(cmd_be));
+ if (rc)
+ goto unlock;
+
+ rc = fsi_spi_check_status(ctx);
+ if (rc)
+ goto unlock;
+
+ rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA0, &data_be,
+ sizeof(data_be));
+ if (rc)
+ goto unlock;
+
+ *value |= (u64)be32_to_cpu(data_be) << 32;
+
+ rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA1, &data_be,
+ sizeof(data_be));
+ if (rc)
+ goto unlock;
+
+ *value |= (u64)be32_to_cpu(data_be);
+ dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value);
+
+unlock:
+ mutex_unlock(&bridge->lock);
+ return rc;
+}
+
+static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value)
+{
+ int rc = 0;
+ __be32 cmd_be;
+ __be32 data_be;
+ u32 cmd = offset + ctx->base;
+ struct fsi2spi *bridge = ctx->bridge;
+
+ if (cmd & FSI2SPI_CMD_WRITE)
+ return -EINVAL;
+
+ rc = mutex_lock_interruptible(&bridge->lock);
+ if (rc)
+ return rc;
+
+ dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value);
+
+ data_be = cpu_to_be32(upper_32_bits(value));
+ rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA0, &data_be,
+ sizeof(data_be));
+ if (rc)
+ goto unlock;
+
+ data_be = cpu_to_be32(lower_32_bits(value));
+ rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA1, &data_be,
+ sizeof(data_be));
+ if (rc)
+ goto unlock;
+
+ cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE);
+ rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be,
+ sizeof(cmd_be));
+ if (rc)
+ goto unlock;
+
+ rc = fsi_spi_check_status(ctx);
+
+unlock:
+ mutex_unlock(&bridge->lock);
+ return rc;
+}
+
+static int fsi_spi_data_in(u64 in, u8 *rx, int len)
+{
+ int i;
+ int num_bytes = min(len, 8);
+
+ for (i = 0; i < num_bytes; ++i)
+ rx[i] = (u8)(in >> (8 * ((num_bytes - 1) - i)));
+
+ return num_bytes;
+}
+
+static int fsi_spi_data_out(u64 *out, const u8 *tx, int len)
+{
+ int i;
+ int num_bytes = min(len, 8);
+ u8 *out_bytes = (u8 *)out;
+
+ /* Unused bytes of the tx data should be 0. */
+ *out = 0ULL;
+
+ for (i = 0; i < num_bytes; ++i)
+ out_bytes[8 - (i + 1)] = tx[i];
+
+ return num_bytes;
+}
+
+static int fsi_spi_reset(struct fsi_spi *ctx)
+{
+ int rc;
+
+ dev_dbg(ctx->dev, "Resetting SPI controller.\n");
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+ SPI_FSI_CLOCK_CFG_RESET1);
+ if (rc)
+ return rc;
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+ SPI_FSI_CLOCK_CFG_RESET2);
+ if (rc)
+ return rc;
+
+ return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
+}
+
+static int fsi_spi_status(struct fsi_spi *ctx, u64 *status, const char *dir)
+{
+ int rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, status);
+
+ if (rc)
+ return rc;
+
+ if (*status & SPI_FSI_STATUS_ANY_ERROR) {
+ dev_err(ctx->dev, "%s error: %016llx\n", dir, *status);
+
+ rc = fsi_spi_reset(ctx);
+ if (rc)
+ return rc;
+
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
+{
+ /*
+ * Add the next byte of instruction to the 8-byte sequence register.
+ * Then decrement the counter so that the next instruction will go in
+ * the right place. Return the index of the slot we just filled in the
+ * sequence register.
+ */
+ seq->data |= (u64)val << seq->bit;
+ seq->bit -= 8;
+}
+
+static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
+{
+ seq->bit = 56;
+ seq->data = 0ULL;
+}
+
+static int fsi_spi_transfer_data(struct fsi_spi *ctx,
+ struct spi_transfer *transfer)
+{
+ int loops;
+ int rc = 0;
+ unsigned long end;
+ u64 status = 0ULL;
+
+ if (transfer->tx_buf) {
+ int nb;
+ int sent = 0;
+ u64 out = 0ULL;
+ const u8 *tx = transfer->tx_buf;
+
+ while (transfer->len > sent) {
+ nb = fsi_spi_data_out(&out, &tx[sent],
+ (int)transfer->len - sent);
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, out);
+ if (rc)
+ return rc;
+
+ loops = 0;
+ end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
+ do {
+ if (loops++ && time_after(jiffies, end))
+ return -ETIMEDOUT;
+
+ rc = fsi_spi_status(ctx, &status, "TX");
+ if (rc)
+ return rc;
+ } while (status & SPI_FSI_STATUS_TDR_FULL);
+
+ sent += nb;
+ }
+ } else if (transfer->rx_buf) {
+ int recv = 0;
+ u64 in = 0ULL;
+ u8 *rx = transfer->rx_buf;
+
+ while (transfer->len > recv) {
+ loops = 0;
+ end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
+ do {
+ if (loops++ && time_after(jiffies, end))
+ return -ETIMEDOUT;
+
+ rc = fsi_spi_status(ctx, &status, "RX");
+ if (rc)
+ return rc;
+ } while (!(status & SPI_FSI_STATUS_RDR_FULL));
+
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
+ if (rc)
+ return rc;
+
+ recv += fsi_spi_data_in(in, &rx[recv],
+ (int)transfer->len - recv);
+ }
+ }
+
+ return 0;
+}
+
+static int fsi_spi_transfer_init(struct fsi_spi *ctx)
+{
+ int loops = 0;
+ int rc;
+ bool reset = false;
+ unsigned long end;
+ u64 seq_state;
+ u64 clock_cfg = 0ULL;
+ u64 status = 0ULL;
+ u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
+ SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
+ FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
+
+ end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
+ do {
+ if (loops++ && time_after(jiffies, end))
+ return -ETIMEDOUT;
+
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status);
+ if (rc)
+ return rc;
+
+ seq_state = status & SPI_FSI_STATUS_SEQ_STATE;
+
+ if (status & (SPI_FSI_STATUS_ANY_ERROR |
+ SPI_FSI_STATUS_TDR_FULL |
+ SPI_FSI_STATUS_RDR_FULL)) {
+ if (reset) {
+ dev_err(ctx->dev,
+ "Initialization error: %08llx\n",
+ status);
+ return -EIO;
+ }
+
+ rc = fsi_spi_reset(ctx);
+ if (rc)
+ return rc;
+
+ reset = true;
+ continue;
+ }
+ } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
+ if (rc)
+ return rc;
+
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
+ if (rc)
+ return rc;
+
+ if ((clock_cfg & (SPI_FSI_CLOCK_CFG_MM_ENABLE |
+ SPI_FSI_CLOCK_CFG_ECC_DISABLE |
+ SPI_FSI_CLOCK_CFG_MODE |
+ SPI_FSI_CLOCK_CFG_SCK_RECV_DEL |
+ SPI_FSI_CLOCK_CFG_SCK_DIV)) != wanted_clock_cfg)
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+ wanted_clock_cfg);
+
+ return rc;
+}
+
+static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *mesg)
+{
+ int rc;
+ u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
+ unsigned int len;
+ struct spi_transfer *transfer;
+ struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
+
+ rc = fsi_spi_check_mux(ctx->bridge->fsi, ctx->dev);
+ if (rc)
+ goto error;
+
+ list_for_each_entry(transfer, &mesg->transfers, transfer_list) {
+ struct fsi_spi_sequence seq;
+ struct spi_transfer *next = NULL;
+
+ /* Sequencer must do shift out (tx) first. */
+ if (!transfer->tx_buf || transfer->len > SPI_FSI_MAX_TX_SIZE) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ dev_dbg(ctx->dev, "Start tx of %d bytes.\n", transfer->len);
+
+ rc = fsi_spi_transfer_init(ctx);
+ if (rc < 0)
+ goto error;
+
+ fsi_spi_sequence_init(&seq);
+ fsi_spi_sequence_add(&seq, seq_slave);
+
+ len = transfer->len;
+ while (len > 8) {
+ fsi_spi_sequence_add(&seq,
+ SPI_FSI_SEQUENCE_SHIFT_OUT(8));
+ len -= 8;
+ }
+ fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
+
+ if (!list_is_last(&transfer->transfer_list,
+ &mesg->transfers)) {
+ next = list_next_entry(transfer, transfer_list);
+
+ /* Sequencer can only do shift in (rx) after tx. */
+ if (next->rx_buf) {
+ u8 shift;
+
+ if (next->len > SPI_FSI_MAX_RX_SIZE) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
+ next->len);
+
+ shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
+ fsi_spi_sequence_add(&seq, shift);
+ } else {
+ next = NULL;
+ }
+ }
+
+ fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SEL_SLAVE(0));
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_SEQUENCE, seq.data);
+ if (rc)
+ goto error;
+
+ rc = fsi_spi_transfer_data(ctx, transfer);
+ if (rc)
+ goto error;
+
+ if (next) {
+ rc = fsi_spi_transfer_data(ctx, next);
+ if (rc)
+ goto error;
+
+ transfer = next;
+ }
+ }
+
+error:
+ mesg->status = rc;
+ spi_finalize_current_message(ctlr);
+
+ return rc;
+}
+
+static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
+{
+ return SPI_FSI_MAX_RX_SIZE;
+}
+
+static int fsi_spi_probe(struct device *dev)
+{
+ int rc;
+ struct device_node *np;
+ int num_controllers_registered = 0;
+ struct fsi2spi *bridge;
+ struct fsi_device *fsi = to_fsi_dev(dev);
+
+ rc = fsi_spi_check_mux(fsi, dev);
+ if (rc)
+ return -ENODEV;
+
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->fsi = fsi;
+ mutex_init(&bridge->lock);
+
+ for_each_available_child_of_node(dev->of_node, np) {
+ u32 base;
+ struct fsi_spi *ctx;
+ struct spi_controller *ctlr;
+
+ if (of_property_read_u32(np, "reg", &base))
+ continue;
+
+ ctlr = spi_alloc_master(dev, sizeof(*ctx));
+ if (!ctlr) {
+ of_node_put(np);
+ break;
+ }
+
+ ctlr->dev.of_node = np;
+ ctlr->num_chipselect = of_get_available_child_count(np) ?: 1;
+ ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ ctlr->max_transfer_size = fsi_spi_max_transfer_size;
+ ctlr->transfer_one_message = fsi_spi_transfer_one_message;
+
+ ctx = spi_controller_get_devdata(ctlr);
+ ctx->dev = &ctlr->dev;
+ ctx->bridge = bridge;
+ ctx->base = base + SPI_FSI_BASE;
+
+ rc = devm_spi_register_controller(dev, ctlr);
+ if (rc)
+ spi_controller_put(ctlr);
+ else
+ num_controllers_registered++;
+ }
+
+ if (!num_controllers_registered)
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct fsi_device_id fsi_spi_ids[] = {
+ { FSI_ENGID_SPI, FSI_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(fsi, fsi_spi_ids);
+
+static struct fsi_driver fsi_spi_driver = {
+ .id_table = fsi_spi_ids,
+ .drv = {
+ .name = "spi-fsi",
+ .bus = &fsi_bus_type,
+ .probe = fsi_spi_probe,
+ },
+};
+module_fsi_driver(fsi_spi_driver);
+
+MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
+MODULE_DESCRIPTION("FSI attached SPI controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
new file mode 100644
index 000000000..7832ce330
--- /dev/null
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale SPI controller driver cpm functions.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+#include <asm/cpm.h>
+#include <soc/fsl/qe/qe.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/byteorder/generic.h>
+
+#include "spi-fsl-cpm.h"
+#include "spi-fsl-lib.h"
+#include "spi-fsl-spi.h"
+
+/* CPM1 and CPM2 are mutually exclusive. */
+#ifdef CONFIG_CPM1
+#include <asm/cpm1.h>
+#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
+#else
+#include <asm/cpm2.h>
+#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
+#endif
+
+#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
+#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
+
+/* SPCOM register values */
+#define SPCOM_STR (1 << 23) /* Start transmit */
+
+#define SPI_PRAM_SIZE 0x100
+#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
+
+static void *fsl_dummy_rx;
+static DEFINE_MUTEX(fsl_dummy_rx_lock);
+static int fsl_dummy_rx_refcnt;
+
+void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
+{
+ if (mspi->flags & SPI_QE) {
+ qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ } else {
+ if (mspi->flags & SPI_CPM1) {
+ out_be32(&mspi->pram->rstate, 0);
+ out_be16(&mspi->pram->rbptr,
+ in_be16(&mspi->pram->rbase));
+ out_be32(&mspi->pram->tstate, 0);
+ out_be16(&mspi->pram->tbptr,
+ in_be16(&mspi->pram->tbase));
+ } else {
+ cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
+
+static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
+{
+ struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
+ struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
+ unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
+ unsigned int xfer_ofs;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
+
+ if (mspi->rx_dma == mspi->dma_dummy_rx)
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
+ else
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
+ out_be16(&rx_bd->cbd_datlen, 0);
+ out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
+
+ if (mspi->tx_dma == mspi->dma_dummy_tx)
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
+ else
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
+ out_be16(&tx_bd->cbd_datlen, xfer_len);
+ out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
+ BD_SC_LAST);
+
+ /* start transfer */
+ mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
+}
+
+int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, bool is_dma_mapped)
+{
+ struct device *dev = mspi->dev;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ if (is_dma_mapped) {
+ mspi->map_tx_dma = 0;
+ mspi->map_rx_dma = 0;
+ } else {
+ mspi->map_tx_dma = 1;
+ mspi->map_rx_dma = 1;
+ }
+
+ if (!t->tx_buf) {
+ mspi->tx_dma = mspi->dma_dummy_tx;
+ mspi->map_tx_dma = 0;
+ }
+
+ if (!t->rx_buf) {
+ mspi->rx_dma = mspi->dma_dummy_rx;
+ mspi->map_rx_dma = 0;
+ }
+ if (t->bits_per_word == 16 && t->tx_buf) {
+ const u16 *src = t->tx_buf;
+ u16 *dst;
+ int i;
+
+ dst = kmalloc(t->len, GFP_KERNEL);
+ if (!dst)
+ return -ENOMEM;
+
+ for (i = 0; i < t->len >> 1; i++)
+ dst[i] = cpu_to_le16p(src + i);
+
+ mspi->tx = dst;
+ mspi->map_tx_dma = 1;
+ }
+
+ if (mspi->map_tx_dma) {
+ void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
+
+ mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mspi->tx_dma)) {
+ dev_err(dev, "unable to map tx dma\n");
+ return -ENOMEM;
+ }
+ } else if (t->tx_buf) {
+ mspi->tx_dma = t->tx_dma;
+ }
+
+ if (mspi->map_rx_dma) {
+ mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mspi->rx_dma)) {
+ dev_err(dev, "unable to map rx dma\n");
+ goto err_rx_dma;
+ }
+ } else if (t->rx_buf) {
+ mspi->rx_dma = t->rx_dma;
+ }
+
+ /* enable rx ints */
+ mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
+
+ mspi->xfer_in_progress = t;
+ mspi->count = t->len;
+
+ /* start CPM transfers */
+ fsl_spi_cpm_bufs_start(mspi);
+
+ return 0;
+
+err_rx_dma:
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
+
+void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct spi_transfer *t = mspi->xfer_in_progress;
+
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
+ if (mspi->map_rx_dma)
+ dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
+ mspi->xfer_in_progress = NULL;
+
+ if (t->bits_per_word == 16 && t->rx_buf) {
+ int i;
+
+ for (i = 0; i < t->len; i += 2)
+ le16_to_cpus(t->rx_buf + i);
+ }
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
+
+void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
+{
+ u16 len;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
+ in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
+
+ len = in_be16(&mspi->rx_bd->cbd_datlen);
+ if (len > mspi->count) {
+ WARN_ON(1);
+ len = mspi->count;
+ }
+
+ /* Clear the events */
+ mpc8xxx_spi_write_reg(&reg_base->event, events);
+
+ mspi->count -= len;
+ if (mspi->count)
+ fsl_spi_cpm_bufs_start(mspi);
+ else
+ complete(&mspi->done);
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
+
+static void *fsl_spi_alloc_dummy_rx(void)
+{
+ mutex_lock(&fsl_dummy_rx_lock);
+
+ if (!fsl_dummy_rx)
+ fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
+ if (fsl_dummy_rx)
+ fsl_dummy_rx_refcnt++;
+
+ mutex_unlock(&fsl_dummy_rx_lock);
+
+ return fsl_dummy_rx;
+}
+
+static void fsl_spi_free_dummy_rx(void)
+{
+ mutex_lock(&fsl_dummy_rx_lock);
+
+ switch (fsl_dummy_rx_refcnt) {
+ case 0:
+ WARN_ON(1);
+ break;
+ case 1:
+ kfree(fsl_dummy_rx);
+ fsl_dummy_rx = NULL;
+ fallthrough;
+ default:
+ fsl_dummy_rx_refcnt--;
+ break;
+ }
+
+ mutex_unlock(&fsl_dummy_rx_lock);
+}
+
+static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct device_node *np = dev->of_node;
+ const u32 *iprop;
+ int size;
+ void __iomem *spi_base;
+ unsigned long pram_ofs = -ENOMEM;
+
+ /* Can't use of_address_to_resource(), QE muram isn't at 0. */
+ iprop = of_get_property(np, "reg", &size);
+
+ /* QE with a fixed pram location? */
+ if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
+ return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
+
+ /* QE but with a dynamic pram location? */
+ if (mspi->flags & SPI_QE) {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
+ return pram_ofs;
+ }
+
+ spi_base = of_iomap(np, 1);
+ if (spi_base == NULL)
+ return -EINVAL;
+
+ if (mspi->flags & SPI_CPM2) {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ out_be16(spi_base, pram_ofs);
+ }
+
+ iounmap(spi_base);
+ return pram_ofs;
+}
+
+int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct device_node *np = dev->of_node;
+ const u32 *iprop;
+ int size;
+ unsigned long bds_ofs;
+
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return 0;
+
+ if (!fsl_spi_alloc_dummy_rx())
+ return -ENOMEM;
+
+ if (mspi->flags & SPI_QE) {
+ iprop = of_get_property(np, "cell-index", &size);
+ if (iprop && size == sizeof(*iprop))
+ mspi->subblock = *iprop;
+
+ switch (mspi->subblock) {
+ default:
+ dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
+ fallthrough;
+ case 0:
+ mspi->subblock = QE_CR_SUBBLOCK_SPI1;
+ break;
+ case 1:
+ mspi->subblock = QE_CR_SUBBLOCK_SPI2;
+ break;
+ }
+ }
+
+ if (mspi->flags & SPI_CPM1) {
+ void *pram;
+
+ pram = devm_platform_ioremap_resource(to_platform_device(dev),
+ 1);
+ if (IS_ERR(pram))
+ mspi->pram = NULL;
+ else
+ mspi->pram = pram;
+ } else {
+ unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
+
+ if (IS_ERR_VALUE(pram_ofs))
+ mspi->pram = NULL;
+ else
+ mspi->pram = cpm_muram_addr(pram_ofs);
+ }
+ if (mspi->pram == NULL) {
+ dev_err(dev, "can't allocate spi parameter ram\n");
+ goto err_pram;
+ }
+
+ bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
+ sizeof(*mspi->rx_bd), 8);
+ if (IS_ERR_VALUE(bds_ofs)) {
+ dev_err(dev, "can't allocate bds\n");
+ goto err_bds;
+ }
+
+ mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
+ dev_err(dev, "unable to map dummy tx buffer\n");
+ goto err_dummy_tx;
+ }
+
+ mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
+ dev_err(dev, "unable to map dummy rx buffer\n");
+ goto err_dummy_rx;
+ }
+
+ mspi->tx_bd = cpm_muram_addr(bds_ofs);
+ mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
+
+ /* Initialize parameter ram. */
+ out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
+ out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
+ out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
+ out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
+ out_be16(&mspi->pram->mrblr, SPI_MRBLR);
+ out_be32(&mspi->pram->rstate, 0);
+ out_be32(&mspi->pram->rdp, 0);
+ out_be16(&mspi->pram->rbptr, 0);
+ out_be16(&mspi->pram->rbc, 0);
+ out_be32(&mspi->pram->rxtmp, 0);
+ out_be32(&mspi->pram->tstate, 0);
+ out_be32(&mspi->pram->tdp, 0);
+ out_be16(&mspi->pram->tbptr, 0);
+ out_be16(&mspi->pram->tbc, 0);
+ out_be32(&mspi->pram->txtmp, 0);
+
+ return 0;
+
+err_dummy_rx:
+ dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
+err_dummy_tx:
+ cpm_muram_free(bds_ofs);
+err_bds:
+ if (!(mspi->flags & SPI_CPM1))
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
+err_pram:
+ fsl_spi_free_dummy_rx();
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
+
+void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return;
+
+ dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
+ cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
+ if (!(mspi->flags & SPI_CPM1))
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
+ fsl_spi_free_dummy_rx();
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-cpm.h b/drivers/spi/spi-fsl-cpm.h
new file mode 100644
index 000000000..160f99970
--- /dev/null
+++ b/drivers/spi/spi-fsl-cpm.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Freescale SPI controller driver cpm functions.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#ifndef __SPI_FSL_CPM_H__
+#define __SPI_FSL_CPM_H__
+
+#include "spi-fsl-lib.h"
+
+#ifdef CONFIG_FSL_SOC
+extern void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi);
+extern int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, bool is_dma_mapped);
+extern void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi);
+extern void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events);
+extern int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi);
+extern void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi);
+#else
+static inline void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi) { }
+static inline int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t,
+ bool is_dma_mapped) { return 0; }
+static inline void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) { }
+static inline void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) { }
+static inline int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) { return 0; }
+static inline void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) { }
+#endif
+
+#endif /* __SPI_FSL_CPM_H__ */
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
new file mode 100644
index 000000000..01930b52c
--- /dev/null
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -0,0 +1,1452 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2013 Freescale Semiconductor, Inc.
+// Copyright 2020 NXP
+//
+// Freescale DSPI driver
+// This file contains a driver for the Freescale DSPI
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-fsl-dspi.h>
+
+#define DRIVER_NAME "fsl-dspi"
+
+#define SPI_MCR 0x00
+#define SPI_MCR_MASTER BIT(31)
+#define SPI_MCR_PCSIS(x) ((x) << 16)
+#define SPI_MCR_CLR_TXF BIT(11)
+#define SPI_MCR_CLR_RXF BIT(10)
+#define SPI_MCR_XSPI BIT(3)
+#define SPI_MCR_DIS_TXF BIT(13)
+#define SPI_MCR_DIS_RXF BIT(12)
+#define SPI_MCR_HALT BIT(0)
+
+#define SPI_TCR 0x08
+#define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4))
+#define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27))
+#define SPI_CTAR_CPOL BIT(26)
+#define SPI_CTAR_CPHA BIT(25)
+#define SPI_CTAR_LSBFE BIT(24)
+#define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22))
+#define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20))
+#define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18))
+#define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16))
+#define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12))
+#define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8))
+#define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4))
+#define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0))
+#define SPI_CTAR_SCALE_BITS 0xf
+
+#define SPI_CTAR0_SLAVE 0x0c
+
+#define SPI_SR 0x2c
+#define SPI_SR_TCFQF BIT(31)
+#define SPI_SR_TFUF BIT(27)
+#define SPI_SR_TFFF BIT(25)
+#define SPI_SR_CMDTCF BIT(23)
+#define SPI_SR_SPEF BIT(21)
+#define SPI_SR_RFOF BIT(19)
+#define SPI_SR_TFIWF BIT(18)
+#define SPI_SR_RFDF BIT(17)
+#define SPI_SR_CMDFFF BIT(16)
+#define SPI_SR_CLEAR (SPI_SR_TCFQF | \
+ SPI_SR_TFUF | SPI_SR_TFFF | \
+ SPI_SR_CMDTCF | SPI_SR_SPEF | \
+ SPI_SR_RFOF | SPI_SR_TFIWF | \
+ SPI_SR_RFDF | SPI_SR_CMDFFF)
+
+#define SPI_RSER_TFFFE BIT(25)
+#define SPI_RSER_TFFFD BIT(24)
+#define SPI_RSER_RFDFE BIT(17)
+#define SPI_RSER_RFDFD BIT(16)
+
+#define SPI_RSER 0x30
+#define SPI_RSER_TCFQE BIT(31)
+#define SPI_RSER_CMDTCFE BIT(23)
+
+#define SPI_PUSHR 0x34
+#define SPI_PUSHR_CMD_CONT BIT(15)
+#define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12)))
+#define SPI_PUSHR_CMD_EOQ BIT(11)
+#define SPI_PUSHR_CMD_CTCNT BIT(10)
+#define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0))
+
+#define SPI_PUSHR_SLAVE 0x34
+
+#define SPI_POPR 0x38
+
+#define SPI_TXFR0 0x3c
+#define SPI_TXFR1 0x40
+#define SPI_TXFR2 0x44
+#define SPI_TXFR3 0x48
+#define SPI_RXFR0 0x7c
+#define SPI_RXFR1 0x80
+#define SPI_RXFR2 0x84
+#define SPI_RXFR3 0x88
+
+#define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4))
+#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
+#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
+
+#define SPI_SREX 0x13c
+
+#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
+#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
+
+#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
+
+struct chip_data {
+ u32 ctar_val;
+};
+
+enum dspi_trans_mode {
+ DSPI_XSPI_MODE,
+ DSPI_DMA_MODE,
+};
+
+struct fsl_dspi_devtype_data {
+ enum dspi_trans_mode trans_mode;
+ u8 max_clock_factor;
+ int fifo_size;
+};
+
+enum {
+ LS1021A,
+ LS1012A,
+ LS1028A,
+ LS1043A,
+ LS1046A,
+ LS2080A,
+ LS2085A,
+ LX2160A,
+ MCF5441X,
+ VF610,
+};
+
+static const struct fsl_dspi_devtype_data devtype_data[] = {
+ [VF610] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 2,
+ .fifo_size = 4,
+ },
+ [LS1021A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LS1012A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
+ [LS1028A] = {
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LS1043A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
+ [LS1046A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
+ [LS2080A] = {
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LS2085A] = {
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LX2160A] = {
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [MCF5441X] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
+};
+
+struct fsl_dspi_dma {
+ u32 *tx_dma_buf;
+ struct dma_chan *chan_tx;
+ dma_addr_t tx_dma_phys;
+ struct completion cmd_tx_complete;
+ struct dma_async_tx_descriptor *tx_desc;
+
+ u32 *rx_dma_buf;
+ struct dma_chan *chan_rx;
+ dma_addr_t rx_dma_phys;
+ struct completion cmd_rx_complete;
+ struct dma_async_tx_descriptor *rx_desc;
+};
+
+struct fsl_dspi {
+ struct spi_controller *ctlr;
+ struct platform_device *pdev;
+
+ struct regmap *regmap;
+ struct regmap *regmap_pushr;
+ int irq;
+ struct clk *clk;
+
+ struct spi_transfer *cur_transfer;
+ struct spi_message *cur_msg;
+ struct chip_data *cur_chip;
+ size_t progress;
+ size_t len;
+ const void *tx;
+ void *rx;
+ u16 tx_cmd;
+ const struct fsl_dspi_devtype_data *devtype_data;
+
+ struct completion xfer_done;
+
+ struct fsl_dspi_dma *dma;
+
+ int oper_word_size;
+ int oper_bits_per_word;
+
+ int words_in_flight;
+
+ /*
+ * Offsets for CMD and TXDATA within SPI_PUSHR when accessed
+ * individually (in XSPI mode)
+ */
+ int pushr_cmd;
+ int pushr_tx;
+
+ void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata);
+ void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
+};
+
+static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ switch (dspi->oper_word_size) {
+ case 1:
+ *txdata = *(u8 *)dspi->tx;
+ break;
+ case 2:
+ *txdata = *(u16 *)dspi->tx;
+ break;
+ case 4:
+ *txdata = *(u32 *)dspi->tx;
+ break;
+ }
+ dspi->tx += dspi->oper_word_size;
+}
+
+static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ switch (dspi->oper_word_size) {
+ case 1:
+ *(u8 *)dspi->rx = rxdata;
+ break;
+ case 2:
+ *(u16 *)dspi->rx = rxdata;
+ break;
+ case 4:
+ *(u32 *)dspi->rx = rxdata;
+ break;
+ }
+ dspi->rx += dspi->oper_word_size;
+}
+
+static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ *txdata = cpu_to_be32(*(u32 *)dspi->tx);
+ dspi->tx += sizeof(u32);
+}
+
+static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ *(u32 *)dspi->rx = be32_to_cpu(rxdata);
+ dspi->rx += sizeof(u32);
+}
+
+static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ *txdata = cpu_to_be16(*(u16 *)dspi->tx);
+ dspi->tx += sizeof(u16);
+}
+
+static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ *(u16 *)dspi->rx = be16_to_cpu(rxdata);
+ dspi->rx += sizeof(u16);
+}
+
+static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ u16 hi = *(u16 *)dspi->tx;
+ u16 lo = *(u16 *)(dspi->tx + 2);
+
+ *txdata = (u32)hi << 16 | lo;
+ dspi->tx += sizeof(u32);
+}
+
+static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ u16 hi = rxdata & 0xffff;
+ u16 lo = rxdata >> 16;
+
+ *(u16 *)dspi->rx = lo;
+ *(u16 *)(dspi->rx + 2) = hi;
+ dspi->rx += sizeof(u32);
+}
+
+/*
+ * Pop one word from the TX buffer for pushing into the
+ * PUSHR register (TX FIFO)
+ */
+static u32 dspi_pop_tx(struct fsl_dspi *dspi)
+{
+ u32 txdata = 0;
+
+ if (dspi->tx)
+ dspi->host_to_dev(dspi, &txdata);
+ dspi->len -= dspi->oper_word_size;
+ return txdata;
+}
+
+/* Prepare one TX FIFO entry (txdata plus cmd) */
+static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
+{
+ u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
+
+ if (spi_controller_is_slave(dspi->ctlr))
+ return data;
+
+ if (dspi->len > 0)
+ cmd |= SPI_PUSHR_CMD_CONT;
+ return cmd << 16 | data;
+}
+
+/* Push one word to the RX buffer from the POPR register (RX FIFO) */
+static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
+{
+ if (!dspi->rx)
+ return;
+ dspi->dev_to_host(dspi, rxdata);
+}
+
+static void dspi_tx_dma_callback(void *arg)
+{
+ struct fsl_dspi *dspi = arg;
+ struct fsl_dspi_dma *dma = dspi->dma;
+
+ complete(&dma->cmd_tx_complete);
+}
+
+static void dspi_rx_dma_callback(void *arg)
+{
+ struct fsl_dspi *dspi = arg;
+ struct fsl_dspi_dma *dma = dspi->dma;
+ int i;
+
+ if (dspi->rx) {
+ for (i = 0; i < dspi->words_in_flight; i++)
+ dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
+ }
+
+ complete(&dma->cmd_rx_complete);
+}
+
+static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
+{
+ struct device *dev = &dspi->pdev->dev;
+ struct fsl_dspi_dma *dma = dspi->dma;
+ int time_left;
+ int i;
+
+ for (i = 0; i < dspi->words_in_flight; i++)
+ dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
+
+ dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
+ dma->tx_dma_phys,
+ dspi->words_in_flight *
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma->tx_desc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ return -EIO;
+ }
+
+ dma->tx_desc->callback = dspi_tx_dma_callback;
+ dma->tx_desc->callback_param = dspi;
+ if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
+ dev_err(dev, "DMA submit failed\n");
+ return -EINVAL;
+ }
+
+ dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
+ dma->rx_dma_phys,
+ dspi->words_in_flight *
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma->rx_desc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ return -EIO;
+ }
+
+ dma->rx_desc->callback = dspi_rx_dma_callback;
+ dma->rx_desc->callback_param = dspi;
+ if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
+ dev_err(dev, "DMA submit failed\n");
+ return -EINVAL;
+ }
+
+ reinit_completion(&dspi->dma->cmd_rx_complete);
+ reinit_completion(&dspi->dma->cmd_tx_complete);
+
+ dma_async_issue_pending(dma->chan_rx);
+ dma_async_issue_pending(dma->chan_tx);
+
+ if (spi_controller_is_slave(dspi->ctlr)) {
+ wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
+ return 0;
+ }
+
+ time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
+ DMA_COMPLETION_TIMEOUT);
+ if (time_left == 0) {
+ dev_err(dev, "DMA tx timeout\n");
+ dmaengine_terminate_all(dma->chan_tx);
+ dmaengine_terminate_all(dma->chan_rx);
+ return -ETIMEDOUT;
+ }
+
+ time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
+ DMA_COMPLETION_TIMEOUT);
+ if (time_left == 0) {
+ dev_err(dev, "DMA rx timeout\n");
+ dmaengine_terminate_all(dma->chan_tx);
+ dmaengine_terminate_all(dma->chan_rx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void dspi_setup_accel(struct fsl_dspi *dspi);
+
+static int dspi_dma_xfer(struct fsl_dspi *dspi)
+{
+ struct spi_message *message = dspi->cur_msg;
+ struct device *dev = &dspi->pdev->dev;
+ int ret = 0;
+
+ /*
+ * dspi->len gets decremented by dspi_pop_tx_pushr in
+ * dspi_next_xfer_dma_submit
+ */
+ while (dspi->len) {
+ /* Figure out operational bits-per-word for this chunk */
+ dspi_setup_accel(dspi);
+
+ dspi->words_in_flight = dspi->len / dspi->oper_word_size;
+ if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
+ dspi->words_in_flight = dspi->devtype_data->fifo_size;
+
+ message->actual_length += dspi->words_in_flight *
+ dspi->oper_word_size;
+
+ ret = dspi_next_xfer_dma_submit(dspi);
+ if (ret) {
+ dev_err(dev, "DMA transfer failed\n");
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
+{
+ int dma_bufsize = dspi->devtype_data->fifo_size * 2;
+ struct device *dev = &dspi->pdev->dev;
+ struct dma_slave_config cfg;
+ struct fsl_dspi_dma *dma;
+ int ret;
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
+ dev_err(dev, "rx dma channel not available\n");
+ ret = PTR_ERR(dma->chan_rx);
+ return ret;
+ }
+
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
+ dev_err(dev, "tx dma channel not available\n");
+ ret = PTR_ERR(dma->chan_tx);
+ goto err_tx_channel;
+ }
+
+ dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
+ dma_bufsize, &dma->tx_dma_phys,
+ GFP_KERNEL);
+ if (!dma->tx_dma_buf) {
+ ret = -ENOMEM;
+ goto err_tx_dma_buf;
+ }
+
+ dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
+ dma_bufsize, &dma->rx_dma_phys,
+ GFP_KERNEL);
+ if (!dma->rx_dma_buf) {
+ ret = -ENOMEM;
+ goto err_rx_dma_buf;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = phy_addr + SPI_POPR;
+ cfg.dst_addr = phy_addr + SPI_PUSHR;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(dma->chan_rx, &cfg);
+ if (ret) {
+ dev_err(dev, "can't configure rx dma channel\n");
+ ret = -EINVAL;
+ goto err_slave_config;
+ }
+
+ cfg.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &cfg);
+ if (ret) {
+ dev_err(dev, "can't configure tx dma channel\n");
+ ret = -EINVAL;
+ goto err_slave_config;
+ }
+
+ dspi->dma = dma;
+ init_completion(&dma->cmd_tx_complete);
+ init_completion(&dma->cmd_rx_complete);
+
+ return 0;
+
+err_slave_config:
+ dma_free_coherent(dma->chan_rx->device->dev,
+ dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
+err_rx_dma_buf:
+ dma_free_coherent(dma->chan_tx->device->dev,
+ dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
+err_tx_dma_buf:
+ dma_release_channel(dma->chan_tx);
+err_tx_channel:
+ dma_release_channel(dma->chan_rx);
+
+ devm_kfree(dev, dma);
+ dspi->dma = NULL;
+
+ return ret;
+}
+
+static void dspi_release_dma(struct fsl_dspi *dspi)
+{
+ int dma_bufsize = dspi->devtype_data->fifo_size * 2;
+ struct fsl_dspi_dma *dma = dspi->dma;
+
+ if (!dma)
+ return;
+
+ if (dma->chan_tx) {
+ dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
+ dma->tx_dma_buf, dma->tx_dma_phys);
+ dma_release_channel(dma->chan_tx);
+ }
+
+ if (dma->chan_rx) {
+ dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
+ dma->rx_dma_buf, dma->rx_dma_phys);
+ dma_release_channel(dma->chan_rx);
+ }
+}
+
+static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
+ unsigned long clkrate)
+{
+ /* Valid baud rate pre-scaler values */
+ int pbr_tbl[4] = {2, 3, 5, 7};
+ int brs[16] = { 2, 4, 6, 8,
+ 16, 32, 64, 128,
+ 256, 512, 1024, 2048,
+ 4096, 8192, 16384, 32768 };
+ int scale_needed, scale, minscale = INT_MAX;
+ int i, j;
+
+ scale_needed = clkrate / speed_hz;
+ if (clkrate % speed_hz)
+ scale_needed++;
+
+ for (i = 0; i < ARRAY_SIZE(brs); i++)
+ for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
+ scale = brs[i] * pbr_tbl[j];
+ if (scale >= scale_needed) {
+ if (scale < minscale) {
+ minscale = scale;
+ *br = i;
+ *pbr = j;
+ }
+ break;
+ }
+ }
+
+ if (minscale == INT_MAX) {
+ pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
+ speed_hz, clkrate);
+ *pbr = ARRAY_SIZE(pbr_tbl) - 1;
+ *br = ARRAY_SIZE(brs) - 1;
+ }
+}
+
+static void ns_delay_scale(char *psc, char *sc, int delay_ns,
+ unsigned long clkrate)
+{
+ int scale_needed, scale, minscale = INT_MAX;
+ int pscale_tbl[4] = {1, 3, 5, 7};
+ u32 remainder;
+ int i, j;
+
+ scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
+ &remainder);
+ if (remainder)
+ scale_needed++;
+
+ for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
+ for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
+ scale = pscale_tbl[i] * (2 << j);
+ if (scale >= scale_needed) {
+ if (scale < minscale) {
+ minscale = scale;
+ *psc = i;
+ *sc = j;
+ }
+ break;
+ }
+ }
+
+ if (minscale == INT_MAX) {
+ pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
+ delay_ns, clkrate);
+ *psc = ARRAY_SIZE(pscale_tbl) - 1;
+ *sc = SPI_CTAR_SCALE_BITS;
+ }
+}
+
+static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
+{
+ /*
+ * The only time when the PCS doesn't need continuation after this word
+ * is when it's last. We need to look ahead, because we actually call
+ * dspi_pop_tx (the function that decrements dspi->len) _after_
+ * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One
+ * word is enough. If there's more to transmit than that,
+ * dspi_xspi_write will know to split the FIFO writes in 2, and
+ * generate a new PUSHR command with the final word that will have PCS
+ * deasserted (not continued) here.
+ */
+ if (dspi->len > dspi->oper_word_size)
+ cmd |= SPI_PUSHR_CMD_CONT;
+ regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
+}
+
+static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata)
+{
+ regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata);
+}
+
+static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
+{
+ int num_bytes = num_words * dspi->oper_word_size;
+ u16 tx_cmd = dspi->tx_cmd;
+
+ /*
+ * If the PCS needs to de-assert (i.e. we're at the end of the buffer
+ * and cs_change does not want the PCS to stay on), then we need a new
+ * PUSHR command, since this one (for the body of the buffer)
+ * necessarily has the CONT bit set.
+ * So send one word less during this go, to force a split and a command
+ * with a single word next time, when CONT will be unset.
+ */
+ if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
+ tx_cmd |= SPI_PUSHR_CMD_EOQ;
+
+ /* Update CTARE */
+ regmap_write(dspi->regmap, SPI_CTARE(0),
+ SPI_FRAME_EBITS(dspi->oper_bits_per_word) |
+ SPI_CTARE_DTCP(num_words));
+
+ /*
+ * Write the CMD FIFO entry first, and then the two
+ * corresponding TX FIFO entries (or one...).
+ */
+ dspi_pushr_cmd_write(dspi, tx_cmd);
+
+ /* Fill TX FIFO with as many transfers as possible */
+ while (num_words--) {
+ u32 data = dspi_pop_tx(dspi);
+
+ dspi_pushr_txdata_write(dspi, data & 0xFFFF);
+ if (dspi->oper_bits_per_word > 16)
+ dspi_pushr_txdata_write(dspi, data >> 16);
+ }
+}
+
+static u32 dspi_popr_read(struct fsl_dspi *dspi)
+{
+ u32 rxdata = 0;
+
+ regmap_read(dspi->regmap, SPI_POPR, &rxdata);
+ return rxdata;
+}
+
+static void dspi_fifo_read(struct fsl_dspi *dspi)
+{
+ int num_fifo_entries = dspi->words_in_flight;
+
+ /* Read one FIFO entry and push to rx buffer */
+ while (num_fifo_entries--)
+ dspi_push_rx(dspi, dspi_popr_read(dspi));
+}
+
+static void dspi_setup_accel(struct fsl_dspi *dspi)
+{
+ struct spi_transfer *xfer = dspi->cur_transfer;
+ bool odd = !!(dspi->len & 1);
+
+ /* No accel for frames not multiple of 8 bits at the moment */
+ if (xfer->bits_per_word % 8)
+ goto no_accel;
+
+ if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
+ dspi->oper_bits_per_word = 16;
+ } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) {
+ dspi->oper_bits_per_word = 8;
+ } else {
+ /* Start off with maximum supported by hardware */
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
+ dspi->oper_bits_per_word = 32;
+ else
+ dspi->oper_bits_per_word = 16;
+
+ /*
+ * And go down only if the buffer can't be sent with
+ * words this big
+ */
+ do {
+ if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8))
+ break;
+
+ dspi->oper_bits_per_word /= 2;
+ } while (dspi->oper_bits_per_word > 8);
+ }
+
+ if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) {
+ dspi->dev_to_host = dspi_8on32_dev_to_host;
+ dspi->host_to_dev = dspi_8on32_host_to_dev;
+ } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) {
+ dspi->dev_to_host = dspi_8on16_dev_to_host;
+ dspi->host_to_dev = dspi_8on16_host_to_dev;
+ } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) {
+ dspi->dev_to_host = dspi_16on32_dev_to_host;
+ dspi->host_to_dev = dspi_16on32_host_to_dev;
+ } else {
+no_accel:
+ dspi->dev_to_host = dspi_native_dev_to_host;
+ dspi->host_to_dev = dspi_native_host_to_dev;
+ dspi->oper_bits_per_word = xfer->bits_per_word;
+ }
+
+ dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
+
+ /*
+ * Update CTAR here (code is common for XSPI and DMA modes).
+ * We will update CTARE in the portion specific to XSPI, when we
+ * also know the preload value (DTCP).
+ */
+ regmap_write(dspi->regmap, SPI_CTAR(0),
+ dspi->cur_chip->ctar_val |
+ SPI_FRAME_BITS(dspi->oper_bits_per_word));
+}
+
+static void dspi_fifo_write(struct fsl_dspi *dspi)
+{
+ int num_fifo_entries = dspi->devtype_data->fifo_size;
+ struct spi_transfer *xfer = dspi->cur_transfer;
+ struct spi_message *msg = dspi->cur_msg;
+ int num_words, num_bytes;
+
+ dspi_setup_accel(dspi);
+
+ /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */
+ if (dspi->oper_word_size == 4)
+ num_fifo_entries /= 2;
+
+ /*
+ * Integer division intentionally trims off odd (or non-multiple of 4)
+ * numbers of bytes at the end of the buffer, which will be sent next
+ * time using a smaller oper_word_size.
+ */
+ num_words = dspi->len / dspi->oper_word_size;
+ if (num_words > num_fifo_entries)
+ num_words = num_fifo_entries;
+
+ /* Update total number of bytes that were transferred */
+ num_bytes = num_words * dspi->oper_word_size;
+ msg->actual_length += num_bytes;
+ dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
+
+ /*
+ * Update shared variable for use in the next interrupt (both in
+ * dspi_fifo_read and in dspi_fifo_write).
+ */
+ dspi->words_in_flight = num_words;
+
+ spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
+
+ dspi_xspi_fifo_write(dspi, num_words);
+ /*
+ * Everything after this point is in a potential race with the next
+ * interrupt, so we must never use dspi->words_in_flight again since it
+ * might already be modified by the next dspi_fifo_write.
+ */
+
+ spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
+ dspi->progress, !dspi->irq);
+}
+
+static int dspi_rxtx(struct fsl_dspi *dspi)
+{
+ dspi_fifo_read(dspi);
+
+ if (!dspi->len)
+ /* Success! */
+ return 0;
+
+ dspi_fifo_write(dspi);
+
+ return -EINPROGRESS;
+}
+
+static int dspi_poll(struct fsl_dspi *dspi)
+{
+ int tries = 1000;
+ u32 spi_sr;
+
+ do {
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+ regmap_write(dspi->regmap, SPI_SR, spi_sr);
+
+ if (spi_sr & SPI_SR_CMDTCF)
+ break;
+ } while (--tries);
+
+ if (!tries)
+ return -ETIMEDOUT;
+
+ return dspi_rxtx(dspi);
+}
+
+static irqreturn_t dspi_interrupt(int irq, void *dev_id)
+{
+ struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
+ u32 spi_sr;
+
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+ regmap_write(dspi->regmap, SPI_SR, spi_sr);
+
+ if (!(spi_sr & SPI_SR_CMDTCF))
+ return IRQ_NONE;
+
+ if (dspi_rxtx(dspi) == 0)
+ complete(&dspi->xfer_done);
+
+ return IRQ_HANDLED;
+}
+
+static int dspi_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = message->spi;
+ struct spi_transfer *transfer;
+ int status = 0;
+
+ message->actual_length = 0;
+
+ list_for_each_entry(transfer, &message->transfers, transfer_list) {
+ dspi->cur_transfer = transfer;
+ dspi->cur_msg = message;
+ dspi->cur_chip = spi_get_ctldata(spi);
+ /* Prepare command word for CMD FIFO */
+ dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
+ SPI_PUSHR_CMD_PCS(spi->chip_select);
+ if (list_is_last(&dspi->cur_transfer->transfer_list,
+ &dspi->cur_msg->transfers)) {
+ /* Leave PCS activated after last transfer when
+ * cs_change is set.
+ */
+ if (transfer->cs_change)
+ dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
+ } else {
+ /* Keep PCS active between transfers in same message
+ * when cs_change is not set, and de-activate PCS
+ * between transfers in the same message when
+ * cs_change is set.
+ */
+ if (!transfer->cs_change)
+ dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
+ }
+
+ dspi->tx = transfer->tx_buf;
+ dspi->rx = transfer->rx_buf;
+ dspi->len = transfer->len;
+ dspi->progress = 0;
+
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+
+ spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
+ dspi->progress, !dspi->irq);
+
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ status = dspi_dma_xfer(dspi);
+ } else {
+ dspi_fifo_write(dspi);
+
+ if (dspi->irq) {
+ wait_for_completion(&dspi->xfer_done);
+ reinit_completion(&dspi->xfer_done);
+ } else {
+ do {
+ status = dspi_poll(dspi);
+ } while (status == -EINPROGRESS);
+ }
+ }
+ if (status)
+ break;
+
+ spi_transfer_delay_exec(transfer);
+ }
+
+ message->status = status;
+ spi_finalize_current_message(ctlr);
+
+ return status;
+}
+
+static int dspi_setup(struct spi_device *spi)
+{
+ struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
+ u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
+ unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
+ u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
+ u32 cs_sck_delay = 0, sck_cs_delay = 0;
+ struct fsl_dspi_platform_data *pdata;
+ unsigned char pasc = 0, asc = 0;
+ struct chip_data *chip;
+ unsigned long clkrate;
+
+ /* Only alloc on first setup */
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+ }
+
+ pdata = dev_get_platdata(&dspi->pdev->dev);
+
+ if (!pdata) {
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
+ &cs_sck_delay);
+
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
+ &sck_cs_delay);
+ } else {
+ cs_sck_delay = pdata->cs_sck_delay;
+ sck_cs_delay = pdata->sck_cs_delay;
+ }
+
+ /* Since tCSC and tASC apply to continuous transfers too, avoid SCK
+ * glitches of half a cycle by never allowing tCSC + tASC to go below
+ * half a SCK period.
+ */
+ if (cs_sck_delay < quarter_period_ns)
+ cs_sck_delay = quarter_period_ns;
+ if (sck_cs_delay < quarter_period_ns)
+ sck_cs_delay = quarter_period_ns;
+
+ dev_dbg(&spi->dev,
+ "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
+ cs_sck_delay, sck_cs_delay);
+
+ clkrate = clk_get_rate(dspi->clk);
+ hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
+
+ /* Set PCS to SCK delay scale values */
+ ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
+
+ /* Set After SCK delay scale values */
+ ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
+
+ chip->ctar_val = 0;
+ if (spi->mode & SPI_CPOL)
+ chip->ctar_val |= SPI_CTAR_CPOL;
+ if (spi->mode & SPI_CPHA)
+ chip->ctar_val |= SPI_CTAR_CPHA;
+
+ if (!spi_controller_is_slave(dspi->ctlr)) {
+ chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
+ SPI_CTAR_CSSCK(cssck) |
+ SPI_CTAR_PASC(pasc) |
+ SPI_CTAR_ASC(asc) |
+ SPI_CTAR_PBR(pbr) |
+ SPI_CTAR_BR(br);
+
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->ctar_val |= SPI_CTAR_LSBFE;
+ }
+
+ spi_set_ctldata(spi, chip);
+
+ return 0;
+}
+
+static void dspi_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+
+ dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
+ spi->controller->bus_num, spi->chip_select);
+
+ kfree(chip);
+}
+
+static const struct of_device_id fsl_dspi_dt_ids[] = {
+ {
+ .compatible = "fsl,vf610-dspi",
+ .data = &devtype_data[VF610],
+ }, {
+ .compatible = "fsl,ls1021a-v1.0-dspi",
+ .data = &devtype_data[LS1021A],
+ }, {
+ .compatible = "fsl,ls1012a-dspi",
+ .data = &devtype_data[LS1012A],
+ }, {
+ .compatible = "fsl,ls1028a-dspi",
+ .data = &devtype_data[LS1028A],
+ }, {
+ .compatible = "fsl,ls1043a-dspi",
+ .data = &devtype_data[LS1043A],
+ }, {
+ .compatible = "fsl,ls1046a-dspi",
+ .data = &devtype_data[LS1046A],
+ }, {
+ .compatible = "fsl,ls2080a-dspi",
+ .data = &devtype_data[LS2080A],
+ }, {
+ .compatible = "fsl,ls2085a-dspi",
+ .data = &devtype_data[LS2085A],
+ }, {
+ .compatible = "fsl,lx2160a-dspi",
+ .data = &devtype_data[LX2160A],
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int dspi_suspend(struct device *dev)
+{
+ struct fsl_dspi *dspi = dev_get_drvdata(dev);
+
+ if (dspi->irq)
+ disable_irq(dspi->irq);
+ spi_controller_suspend(dspi->ctlr);
+ clk_disable_unprepare(dspi->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int dspi_resume(struct device *dev)
+{
+ struct fsl_dspi *dspi = dev_get_drvdata(dev);
+ int ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = clk_prepare_enable(dspi->clk);
+ if (ret)
+ return ret;
+ spi_controller_resume(dspi->ctlr);
+ if (dspi->irq)
+ enable_irq(dspi->irq);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+
+static const struct regmap_range dspi_volatile_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_TCR),
+ regmap_reg_range(SPI_SR, SPI_SR),
+ regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
+};
+
+static const struct regmap_access_table dspi_volatile_table = {
+ .yes_ranges = dspi_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
+};
+
+static const struct regmap_config dspi_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x88,
+ .volatile_table = &dspi_volatile_table,
+};
+
+static const struct regmap_range dspi_xspi_volatile_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_TCR),
+ regmap_reg_range(SPI_SR, SPI_SR),
+ regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
+ regmap_reg_range(SPI_SREX, SPI_SREX),
+};
+
+static const struct regmap_access_table dspi_xspi_volatile_table = {
+ .yes_ranges = dspi_xspi_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
+};
+
+static const struct regmap_config dspi_xspi_regmap_config[] = {
+ {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x13c,
+ .volatile_table = &dspi_xspi_volatile_table,
+ },
+ {
+ .name = "pushr",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .max_register = 0x2,
+ },
+};
+
+static int dspi_init(struct fsl_dspi *dspi)
+{
+ unsigned int mcr;
+
+ /* Set idle states for all chip select signals to high */
+ mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0));
+
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
+ mcr |= SPI_MCR_XSPI;
+ if (!spi_controller_is_slave(dspi->ctlr))
+ mcr |= SPI_MCR_MASTER;
+
+ regmap_write(dspi->regmap, SPI_MCR, mcr);
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+
+ switch (dspi->devtype_data->trans_mode) {
+ case DSPI_XSPI_MODE:
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
+ break;
+ case DSPI_DMA_MODE:
+ regmap_write(dspi->regmap, SPI_RSER,
+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ break;
+ default:
+ dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
+ dspi->devtype_data->trans_mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dspi_slave_abort(struct spi_master *master)
+{
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ /*
+ * Terminate all pending DMA transactions for the SPI working
+ * in SLAVE mode.
+ */
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ dmaengine_terminate_sync(dspi->dma->chan_rx);
+ dmaengine_terminate_sync(dspi->dma->chan_tx);
+ }
+
+ /* Clear the internal DSPI RX and TX FIFO buffers */
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+
+ return 0;
+}
+
+static int dspi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct regmap_config *regmap_config;
+ struct fsl_dspi_platform_data *pdata;
+ struct spi_controller *ctlr;
+ int ret, cs_num, bus_num = -1;
+ struct fsl_dspi *dspi;
+ struct resource *res;
+ void __iomem *base;
+ bool big_endian;
+
+ dspi = devm_kzalloc(&pdev->dev, sizeof(*dspi), GFP_KERNEL);
+ if (!dspi)
+ return -ENOMEM;
+
+ ctlr = spi_alloc_master(&pdev->dev, 0);
+ if (!ctlr)
+ return -ENOMEM;
+
+ spi_controller_set_devdata(ctlr, dspi);
+ platform_set_drvdata(pdev, dspi);
+
+ dspi->pdev = pdev;
+ dspi->ctlr = ctlr;
+
+ ctlr->setup = dspi_setup;
+ ctlr->transfer_one_message = dspi_transfer_one_message;
+ ctlr->dev.of_node = pdev->dev.of_node;
+
+ ctlr->cleanup = dspi_cleanup;
+ ctlr->slave_abort = dspi_slave_abort;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num;
+ ctlr->bus_num = pdata->bus_num;
+
+ /* Only Coldfire uses platform data */
+ dspi->devtype_data = &devtype_data[MCF5441X];
+ big_endian = true;
+ } else {
+
+ ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
+ goto out_ctlr_put;
+ }
+ ctlr->num_chipselect = ctlr->max_native_cs = cs_num;
+
+ of_property_read_u32(np, "bus-num", &bus_num);
+ ctlr->bus_num = bus_num;
+
+ if (of_property_read_bool(np, "spi-slave"))
+ ctlr->slave = true;
+
+ dspi->devtype_data = of_device_get_match_data(&pdev->dev);
+ if (!dspi->devtype_data) {
+ dev_err(&pdev->dev, "can't get devtype_data\n");
+ ret = -EFAULT;
+ goto out_ctlr_put;
+ }
+
+ big_endian = of_device_is_big_endian(np);
+ }
+ if (big_endian) {
+ dspi->pushr_cmd = 0;
+ dspi->pushr_tx = 2;
+ } else {
+ dspi->pushr_cmd = 2;
+ dspi->pushr_tx = 0;
+ }
+
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
+ ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ else
+ ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_ctlr_put;
+ }
+
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
+ regmap_config = &dspi_xspi_regmap_config[0];
+ else
+ regmap_config = &dspi_regmap_config;
+ dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
+ if (IS_ERR(dspi->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ PTR_ERR(dspi->regmap));
+ ret = PTR_ERR(dspi->regmap);
+ goto out_ctlr_put;
+ }
+
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
+ dspi->regmap_pushr = devm_regmap_init_mmio(
+ &pdev->dev, base + SPI_PUSHR,
+ &dspi_xspi_regmap_config[1]);
+ if (IS_ERR(dspi->regmap_pushr)) {
+ dev_err(&pdev->dev,
+ "failed to init pushr regmap: %ld\n",
+ PTR_ERR(dspi->regmap_pushr));
+ ret = PTR_ERR(dspi->regmap_pushr);
+ goto out_ctlr_put;
+ }
+ }
+
+ dspi->clk = devm_clk_get(&pdev->dev, "dspi");
+ if (IS_ERR(dspi->clk)) {
+ ret = PTR_ERR(dspi->clk);
+ dev_err(&pdev->dev, "unable to get clock\n");
+ goto out_ctlr_put;
+ }
+ ret = clk_prepare_enable(dspi->clk);
+ if (ret)
+ goto out_ctlr_put;
+
+ ret = dspi_init(dspi);
+ if (ret)
+ goto out_clk_put;
+
+ dspi->irq = platform_get_irq(pdev, 0);
+ if (dspi->irq <= 0) {
+ dev_info(&pdev->dev,
+ "can't get platform irq, using poll mode\n");
+ dspi->irq = 0;
+ goto poll_mode;
+ }
+
+ init_completion(&dspi->xfer_done);
+
+ ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
+ IRQF_SHARED, pdev->name, dspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
+ goto out_clk_put;
+ }
+
+poll_mode:
+
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ ret = dspi_request_dma(dspi, res->start);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get dma channels\n");
+ goto out_free_irq;
+ }
+ }
+
+ ctlr->max_speed_hz =
+ clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
+
+ if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE)
+ ctlr->ptp_sts_supported = true;
+
+ ret = spi_register_controller(ctlr);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
+ goto out_release_dma;
+ }
+
+ return ret;
+
+out_release_dma:
+ dspi_release_dma(dspi);
+out_free_irq:
+ if (dspi->irq)
+ free_irq(dspi->irq, dspi);
+out_clk_put:
+ clk_disable_unprepare(dspi->clk);
+out_ctlr_put:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+static int dspi_remove(struct platform_device *pdev)
+{
+ struct fsl_dspi *dspi = platform_get_drvdata(pdev);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_controller(dspi->ctlr);
+
+ /* Disable RX and TX */
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
+ SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
+
+ /* Stop Running */
+ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
+
+ dspi_release_dma(dspi);
+ if (dspi->irq)
+ free_irq(dspi->irq, dspi);
+ clk_disable_unprepare(dspi->clk);
+
+ return 0;
+}
+
+static void dspi_shutdown(struct platform_device *pdev)
+{
+ dspi_remove(pdev);
+}
+
+static struct platform_driver fsl_dspi_driver = {
+ .driver.name = DRIVER_NAME,
+ .driver.of_match_table = fsl_dspi_dt_ids,
+ .driver.owner = THIS_MODULE,
+ .driver.pm = &dspi_pm,
+ .probe = dspi_probe,
+ .remove = dspi_remove,
+ .shutdown = dspi_shutdown,
+};
+module_platform_driver(fsl_dspi_driver);
+
+MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
new file mode 100644
index 000000000..f7066bef7
--- /dev/null
+++ b/drivers/spi/spi-fsl-espi.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale eSPI controller driver.
+ *
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fsl_devices.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+#include <sysdev/fsl_soc.h>
+
+/* eSPI Controller registers */
+#define ESPI_SPMODE 0x00 /* eSPI mode register */
+#define ESPI_SPIE 0x04 /* eSPI event register */
+#define ESPI_SPIM 0x08 /* eSPI mask register */
+#define ESPI_SPCOM 0x0c /* eSPI command register */
+#define ESPI_SPITF 0x10 /* eSPI transmit FIFO access register*/
+#define ESPI_SPIRF 0x14 /* eSPI receive FIFO access register*/
+#define ESPI_SPMODE0 0x20 /* eSPI cs0 mode register */
+
+#define ESPI_SPMODEx(x) (ESPI_SPMODE0 + (x) * 4)
+
+/* eSPI Controller mode register definitions */
+#define SPMODE_ENABLE BIT(31)
+#define SPMODE_LOOP BIT(30)
+#define SPMODE_TXTHR(x) ((x) << 8)
+#define SPMODE_RXTHR(x) ((x) << 0)
+
+/* eSPI Controller CS mode register definitions */
+#define CSMODE_CI_INACTIVEHIGH BIT(31)
+#define CSMODE_CP_BEGIN_EDGECLK BIT(30)
+#define CSMODE_REV BIT(29)
+#define CSMODE_DIV16 BIT(28)
+#define CSMODE_PM(x) ((x) << 24)
+#define CSMODE_POL_1 BIT(20)
+#define CSMODE_LEN(x) ((x) << 16)
+#define CSMODE_BEF(x) ((x) << 12)
+#define CSMODE_AFT(x) ((x) << 8)
+#define CSMODE_CG(x) ((x) << 3)
+
+#define FSL_ESPI_FIFO_SIZE 32
+#define FSL_ESPI_RXTHR 15
+
+/* Default mode/csmode for eSPI controller */
+#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR))
+#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
+ | CSMODE_AFT(0) | CSMODE_CG(1))
+
+/* SPIE register values */
+#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
+#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
+#define SPIE_TXE BIT(15) /* TX FIFO empty */
+#define SPIE_DON BIT(14) /* TX done */
+#define SPIE_RXT BIT(13) /* RX FIFO threshold */
+#define SPIE_RXF BIT(12) /* RX FIFO full */
+#define SPIE_TXT BIT(11) /* TX FIFO threshold*/
+#define SPIE_RNE BIT(9) /* RX FIFO not empty */
+#define SPIE_TNF BIT(8) /* TX FIFO not full */
+
+/* SPIM register values */
+#define SPIM_TXE BIT(15) /* TX FIFO empty */
+#define SPIM_DON BIT(14) /* TX done */
+#define SPIM_RXT BIT(13) /* RX FIFO threshold */
+#define SPIM_RXF BIT(12) /* RX FIFO full */
+#define SPIM_TXT BIT(11) /* TX FIFO threshold*/
+#define SPIM_RNE BIT(9) /* RX FIFO not empty */
+#define SPIM_TNF BIT(8) /* TX FIFO not full */
+
+/* SPCOM register values */
+#define SPCOM_CS(x) ((x) << 30)
+#define SPCOM_DO BIT(28) /* Dual output */
+#define SPCOM_TO BIT(27) /* TX only */
+#define SPCOM_RXSKIP(x) ((x) << 16)
+#define SPCOM_TRANLEN(x) ((x) << 0)
+
+#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
+
+#define AUTOSUSPEND_TIMEOUT 2000
+
+struct fsl_espi {
+ struct device *dev;
+ void __iomem *reg_base;
+
+ struct list_head *m_transfers;
+ struct spi_transfer *tx_t;
+ unsigned int tx_pos;
+ bool tx_done;
+ struct spi_transfer *rx_t;
+ unsigned int rx_pos;
+ bool rx_done;
+
+ bool swab;
+ unsigned int rxskip;
+
+ spinlock_t lock;
+
+ u32 spibrg; /* SPIBRG input clock */
+
+ struct completion done;
+};
+
+struct fsl_espi_cs {
+ u32 hw_mode;
+};
+
+static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset)
+{
+ return ioread32be(espi->reg_base + offset);
+}
+
+static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset)
+{
+ return ioread16be(espi->reg_base + offset);
+}
+
+static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset)
+{
+ return ioread8(espi->reg_base + offset);
+}
+
+static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset,
+ u32 val)
+{
+ iowrite32be(val, espi->reg_base + offset);
+}
+
+static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset,
+ u16 val)
+{
+ iowrite16be(val, espi->reg_base + offset);
+}
+
+static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset,
+ u8 val)
+{
+ iowrite8(val, espi->reg_base + offset);
+}
+
+static int fsl_espi_check_message(struct spi_message *m)
+{
+ struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
+ struct spi_transfer *t, *first;
+
+ if (m->frame_length > SPCOM_TRANLEN_MAX) {
+ dev_err(espi->dev, "message too long, size is %u bytes\n",
+ m->frame_length);
+ return -EMSGSIZE;
+ }
+
+ first = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (first->bits_per_word != t->bits_per_word ||
+ first->speed_hz != t->speed_hz) {
+ dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
+ return -EINVAL;
+ }
+ }
+
+ /* ESPI supports MSB-first transfers for word size 8 / 16 only */
+ if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 &&
+ first->bits_per_word != 16) {
+ dev_err(espi->dev,
+ "MSB-first transfer not supported for wordsize %u\n",
+ first->bits_per_word);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m)
+{
+ struct spi_transfer *t;
+ unsigned int i = 0, rxskip = 0;
+
+ /*
+ * prerequisites for ESPI rxskip mode:
+ * - message has two transfers
+ * - first transfer is a write and second is a read
+ *
+ * In addition the current low-level transfer mechanism requires
+ * that the rxskip bytes fit into the TX FIFO. Else the transfer
+ * would hang because after the first FSL_ESPI_FIFO_SIZE bytes
+ * the TX FIFO isn't re-filled.
+ */
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (i == 0) {
+ if (!t->tx_buf || t->rx_buf ||
+ t->len > FSL_ESPI_FIFO_SIZE)
+ return 0;
+ rxskip = t->len;
+ } else if (i == 1) {
+ if (t->tx_buf || !t->rx_buf)
+ return 0;
+ }
+ i++;
+ }
+
+ return i == 2 ? rxskip : 0;
+}
+
+static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events)
+{
+ u32 tx_fifo_avail;
+ unsigned int tx_left;
+ const void *tx_buf;
+
+ /* if events is zero transfer has not started and tx fifo is empty */
+ tx_fifo_avail = events ? SPIE_TXCNT(events) : FSL_ESPI_FIFO_SIZE;
+start:
+ tx_left = espi->tx_t->len - espi->tx_pos;
+ tx_buf = espi->tx_t->tx_buf;
+ while (tx_fifo_avail >= min(4U, tx_left) && tx_left) {
+ if (tx_left >= 4) {
+ if (!tx_buf)
+ fsl_espi_write_reg(espi, ESPI_SPITF, 0);
+ else if (espi->swab)
+ fsl_espi_write_reg(espi, ESPI_SPITF,
+ swahb32p(tx_buf + espi->tx_pos));
+ else
+ fsl_espi_write_reg(espi, ESPI_SPITF,
+ *(u32 *)(tx_buf + espi->tx_pos));
+ espi->tx_pos += 4;
+ tx_left -= 4;
+ tx_fifo_avail -= 4;
+ } else if (tx_left >= 2 && tx_buf && espi->swab) {
+ fsl_espi_write_reg16(espi, ESPI_SPITF,
+ swab16p(tx_buf + espi->tx_pos));
+ espi->tx_pos += 2;
+ tx_left -= 2;
+ tx_fifo_avail -= 2;
+ } else {
+ if (!tx_buf)
+ fsl_espi_write_reg8(espi, ESPI_SPITF, 0);
+ else
+ fsl_espi_write_reg8(espi, ESPI_SPITF,
+ *(u8 *)(tx_buf + espi->tx_pos));
+ espi->tx_pos += 1;
+ tx_left -= 1;
+ tx_fifo_avail -= 1;
+ }
+ }
+
+ if (!tx_left) {
+ /* Last transfer finished, in rxskip mode only one is needed */
+ if (list_is_last(&espi->tx_t->transfer_list,
+ espi->m_transfers) || espi->rxskip) {
+ espi->tx_done = true;
+ return;
+ }
+ espi->tx_t = list_next_entry(espi->tx_t, transfer_list);
+ espi->tx_pos = 0;
+ /* continue with next transfer if tx fifo is not full */
+ if (tx_fifo_avail)
+ goto start;
+ }
+}
+
+static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events)
+{
+ u32 rx_fifo_avail = SPIE_RXCNT(events);
+ unsigned int rx_left;
+ void *rx_buf;
+
+start:
+ rx_left = espi->rx_t->len - espi->rx_pos;
+ rx_buf = espi->rx_t->rx_buf;
+ while (rx_fifo_avail >= min(4U, rx_left) && rx_left) {
+ if (rx_left >= 4) {
+ u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF);
+
+ if (rx_buf && espi->swab)
+ *(u32 *)(rx_buf + espi->rx_pos) = swahb32(val);
+ else if (rx_buf)
+ *(u32 *)(rx_buf + espi->rx_pos) = val;
+ espi->rx_pos += 4;
+ rx_left -= 4;
+ rx_fifo_avail -= 4;
+ } else if (rx_left >= 2 && rx_buf && espi->swab) {
+ u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF);
+
+ *(u16 *)(rx_buf + espi->rx_pos) = swab16(val);
+ espi->rx_pos += 2;
+ rx_left -= 2;
+ rx_fifo_avail -= 2;
+ } else {
+ u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF);
+
+ if (rx_buf)
+ *(u8 *)(rx_buf + espi->rx_pos) = val;
+ espi->rx_pos += 1;
+ rx_left -= 1;
+ rx_fifo_avail -= 1;
+ }
+ }
+
+ if (!rx_left) {
+ if (list_is_last(&espi->rx_t->transfer_list,
+ espi->m_transfers)) {
+ espi->rx_done = true;
+ return;
+ }
+ espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
+ espi->rx_pos = 0;
+ /* continue with next transfer if rx fifo is not empty */
+ if (rx_fifo_avail)
+ goto start;
+ }
+}
+
+static void fsl_espi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct fsl_espi *espi = spi_master_get_devdata(spi->master);
+ int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
+ u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz;
+ struct fsl_espi_cs *cs = spi_get_ctldata(spi);
+ u32 hw_mode_old = cs->hw_mode;
+
+ /* mask out bits we are going to set */
+ cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
+
+ cs->hw_mode |= CSMODE_LEN(bits_per_word - 1);
+
+ pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1;
+
+ if (pm > 15) {
+ cs->hw_mode |= CSMODE_DIV16;
+ pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1;
+ }
+
+ cs->hw_mode |= CSMODE_PM(pm);
+
+ /* don't write the mode register if the mode doesn't change */
+ if (cs->hw_mode != hw_mode_old)
+ fsl_espi_write_reg(espi, ESPI_SPMODEx(spi->chip_select),
+ cs->hw_mode);
+}
+
+static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct fsl_espi *espi = spi_master_get_devdata(spi->master);
+ unsigned int rx_len = t->len;
+ u32 mask, spcom;
+ int ret;
+
+ reinit_completion(&espi->done);
+
+ /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
+ spcom = SPCOM_CS(spi->chip_select);
+ spcom |= SPCOM_TRANLEN(t->len - 1);
+
+ /* configure RXSKIP mode */
+ if (espi->rxskip) {
+ spcom |= SPCOM_RXSKIP(espi->rxskip);
+ rx_len = t->len - espi->rxskip;
+ if (t->rx_nbits == SPI_NBITS_DUAL)
+ spcom |= SPCOM_DO;
+ }
+
+ fsl_espi_write_reg(espi, ESPI_SPCOM, spcom);
+
+ /* enable interrupts */
+ mask = SPIM_DON;
+ if (rx_len > FSL_ESPI_FIFO_SIZE)
+ mask |= SPIM_RXT;
+ fsl_espi_write_reg(espi, ESPI_SPIM, mask);
+
+ /* Prevent filling the fifo from getting interrupted */
+ spin_lock_irq(&espi->lock);
+ fsl_espi_fill_tx_fifo(espi, 0);
+ spin_unlock_irq(&espi->lock);
+
+ /* Won't hang up forever, SPI bus sometimes got lost interrupts... */
+ ret = wait_for_completion_timeout(&espi->done, 2 * HZ);
+ if (ret == 0)
+ dev_err(espi->dev, "Transfer timed out!\n");
+
+ /* disable rx ints */
+ fsl_espi_write_reg(espi, ESPI_SPIM, 0);
+
+ return ret == 0 ? -ETIMEDOUT : 0;
+}
+
+static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
+{
+ struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
+ struct spi_device *spi = m->spi;
+ int ret;
+
+ /* In case of LSB-first and bits_per_word > 8 byte-swap all words */
+ espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8;
+
+ espi->m_transfers = &m->transfers;
+ espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ espi->tx_pos = 0;
+ espi->tx_done = false;
+ espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ espi->rx_pos = 0;
+ espi->rx_done = false;
+
+ espi->rxskip = fsl_espi_check_rxskip_mode(m);
+ if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) {
+ dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n");
+ return -EINVAL;
+ }
+
+ /* In RXSKIP mode skip first transfer for reads */
+ if (espi->rxskip)
+ espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
+
+ fsl_espi_setup_transfer(spi, trans);
+
+ ret = fsl_espi_bufs(spi, trans);
+
+ spi_transfer_delay_exec(trans);
+
+ return ret;
+}
+
+static int fsl_espi_do_one_msg(struct spi_master *master,
+ struct spi_message *m)
+{
+ unsigned int rx_nbits = 0, delay_nsecs = 0;
+ struct spi_transfer *t, trans = {};
+ int ret;
+
+ ret = fsl_espi_check_message(m);
+ if (ret)
+ goto out;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ unsigned int delay = spi_delay_to_ns(&t->delay, t);
+
+ if (delay > delay_nsecs)
+ delay_nsecs = delay;
+ if (t->rx_nbits > rx_nbits)
+ rx_nbits = t->rx_nbits;
+ }
+
+ t = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+
+ trans.len = m->frame_length;
+ trans.speed_hz = t->speed_hz;
+ trans.bits_per_word = t->bits_per_word;
+ trans.delay.value = delay_nsecs;
+ trans.delay.unit = SPI_DELAY_UNIT_NSECS;
+ trans.rx_nbits = rx_nbits;
+
+ if (trans.len)
+ ret = fsl_espi_trans(m, &trans);
+
+ m->actual_length = ret ? 0 : trans.len;
+out:
+ if (m->status == -EINPROGRESS)
+ m->status = ret;
+
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+static int fsl_espi_setup(struct spi_device *spi)
+{
+ struct fsl_espi *espi;
+ u32 loop_mode;
+ struct fsl_espi_cs *cs = spi_get_ctldata(spi);
+
+ if (!cs) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi_set_ctldata(spi, cs);
+ }
+
+ espi = spi_master_get_devdata(spi->master);
+
+ pm_runtime_get_sync(espi->dev);
+
+ cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi->chip_select));
+ /* mask out bits we are going to set */
+ cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
+ | CSMODE_REV);
+
+ if (spi->mode & SPI_CPHA)
+ cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK;
+ if (spi->mode & SPI_CPOL)
+ cs->hw_mode |= CSMODE_CI_INACTIVEHIGH;
+ if (!(spi->mode & SPI_LSB_FIRST))
+ cs->hw_mode |= CSMODE_REV;
+
+ /* Handle the loop mode */
+ loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE);
+ loop_mode &= ~SPMODE_LOOP;
+ if (spi->mode & SPI_LOOP)
+ loop_mode |= SPMODE_LOOP;
+ fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode);
+
+ fsl_espi_setup_transfer(spi, NULL);
+
+ pm_runtime_mark_last_busy(espi->dev);
+ pm_runtime_put_autosuspend(espi->dev);
+
+ return 0;
+}
+
+static void fsl_espi_cleanup(struct spi_device *spi)
+{
+ struct fsl_espi_cs *cs = spi_get_ctldata(spi);
+
+ kfree(cs);
+ spi_set_ctldata(spi, NULL);
+}
+
+static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
+{
+ if (!espi->rx_done)
+ fsl_espi_read_rx_fifo(espi, events);
+
+ if (!espi->tx_done)
+ fsl_espi_fill_tx_fifo(espi, events);
+
+ if (!espi->tx_done || !espi->rx_done)
+ return;
+
+ /* we're done, but check for errors before returning */
+ events = fsl_espi_read_reg(espi, ESPI_SPIE);
+
+ if (!(events & SPIE_DON))
+ dev_err(espi->dev,
+ "Transfer done but SPIE_DON isn't set!\n");
+
+ if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE) {
+ dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n");
+ dev_err(espi->dev, "SPIE_RXCNT = %d, SPIE_TXCNT = %d\n",
+ SPIE_RXCNT(events), SPIE_TXCNT(events));
+ }
+
+ complete(&espi->done);
+}
+
+static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
+{
+ struct fsl_espi *espi = context_data;
+ u32 events, mask;
+
+ spin_lock(&espi->lock);
+
+ /* Get interrupt events(tx/rx) */
+ events = fsl_espi_read_reg(espi, ESPI_SPIE);
+ mask = fsl_espi_read_reg(espi, ESPI_SPIM);
+ if (!(events & mask)) {
+ spin_unlock(&espi->lock);
+ return IRQ_NONE;
+ }
+
+ dev_vdbg(espi->dev, "%s: events %x\n", __func__, events);
+
+ fsl_espi_cpu_irq(espi, events);
+
+ /* Clear the events */
+ fsl_espi_write_reg(espi, ESPI_SPIE, events);
+
+ spin_unlock(&espi->lock);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM
+static int fsl_espi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_espi *espi = spi_master_get_devdata(master);
+ u32 regval;
+
+ regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
+ regval &= ~SPMODE_ENABLE;
+ fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
+
+ return 0;
+}
+
+static int fsl_espi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_espi *espi = spi_master_get_devdata(master);
+ u32 regval;
+
+ regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
+ regval |= SPMODE_ENABLE;
+ fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
+
+ return 0;
+}
+#endif
+
+static size_t fsl_espi_max_message_size(struct spi_device *spi)
+{
+ return SPCOM_TRANLEN_MAX;
+}
+
+static void fsl_espi_init_regs(struct device *dev, bool initial)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_espi *espi = spi_master_get_devdata(master);
+ struct device_node *nc;
+ u32 csmode, cs, prop;
+ int ret;
+
+ /* SPI controller initializations */
+ fsl_espi_write_reg(espi, ESPI_SPMODE, 0);
+ fsl_espi_write_reg(espi, ESPI_SPIM, 0);
+ fsl_espi_write_reg(espi, ESPI_SPCOM, 0);
+ fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff);
+
+ /* Init eSPI CS mode register */
+ for_each_available_child_of_node(master->dev.of_node, nc) {
+ /* get chip select */
+ ret = of_property_read_u32(nc, "reg", &cs);
+ if (ret || cs >= master->num_chipselect)
+ continue;
+
+ csmode = CSMODE_INIT_VAL;
+
+ /* check if CSBEF is set in device tree */
+ ret = of_property_read_u32(nc, "fsl,csbef", &prop);
+ if (!ret) {
+ csmode &= ~(CSMODE_BEF(0xf));
+ csmode |= CSMODE_BEF(prop);
+ }
+
+ /* check if CSAFT is set in device tree */
+ ret = of_property_read_u32(nc, "fsl,csaft", &prop);
+ if (!ret) {
+ csmode &= ~(CSMODE_AFT(0xf));
+ csmode |= CSMODE_AFT(prop);
+ }
+
+ fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode);
+
+ if (initial)
+ dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode);
+ }
+
+ /* Enable SPI interface */
+ fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE);
+}
+
+static int fsl_espi_probe(struct device *dev, struct resource *mem,
+ unsigned int irq, unsigned int num_cs)
+{
+ struct spi_master *master;
+ struct fsl_espi *espi;
+ int ret;
+
+ master = spi_alloc_master(dev, sizeof(struct fsl_espi));
+ if (!master)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, master);
+
+ master->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+ SPI_LSB_FIRST | SPI_LOOP;
+ master->dev.of_node = dev->of_node;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ master->setup = fsl_espi_setup;
+ master->cleanup = fsl_espi_cleanup;
+ master->transfer_one_message = fsl_espi_do_one_msg;
+ master->auto_runtime_pm = true;
+ master->max_message_size = fsl_espi_max_message_size;
+ master->num_chipselect = num_cs;
+
+ espi = spi_master_get_devdata(master);
+ spin_lock_init(&espi->lock);
+
+ espi->dev = dev;
+ espi->spibrg = fsl_get_sys_freq();
+ if (espi->spibrg == -1) {
+ dev_err(dev, "Can't get sys frequency!\n");
+ ret = -EINVAL;
+ goto err_probe;
+ }
+ /* determined by clock divider fields DIV16/PM in register SPMODEx */
+ master->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16);
+ master->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4);
+
+ init_completion(&espi->done);
+
+ espi->reg_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(espi->reg_base)) {
+ ret = PTR_ERR(espi->reg_base);
+ goto err_probe;
+ }
+
+ /* Register for SPI Interrupt */
+ ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi);
+ if (ret)
+ goto err_probe;
+
+ fsl_espi_init_regs(dev, true);
+
+ pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret < 0)
+ goto err_pm;
+
+ dev_info(dev, "irq = %u\n", irq);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+err_pm:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+err_probe:
+ spi_master_put(master);
+ return ret;
+}
+
+static int of_fsl_espi_get_chipselects(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ u32 num_cs;
+ int ret;
+
+ ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs);
+ if (ret) {
+ dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
+ return 0;
+ }
+
+ return num_cs;
+}
+
+static int of_fsl_espi_probe(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct resource mem;
+ unsigned int irq, num_cs;
+ int ret;
+
+ if (of_property_read_bool(np, "mode")) {
+ dev_err(dev, "mode property is not supported on ESPI!\n");
+ return -EINVAL;
+ }
+
+ num_cs = of_fsl_espi_get_chipselects(dev);
+ if (!num_cs)
+ return -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &mem);
+ if (ret)
+ return ret;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return -EINVAL;
+
+ return fsl_espi_probe(dev, &mem, irq, num_cs);
+}
+
+static int of_fsl_espi_remove(struct platform_device *dev)
+{
+ pm_runtime_disable(&dev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int of_fsl_espi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int of_fsl_espi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ fsl_espi_init_regs(dev, false);
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops espi_pm = {
+ SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend,
+ fsl_espi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
+};
+
+static const struct of_device_id of_fsl_espi_match[] = {
+ { .compatible = "fsl,mpc8536-espi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
+
+static struct platform_driver fsl_espi_driver = {
+ .driver = {
+ .name = "fsl_espi",
+ .of_match_table = of_fsl_espi_match,
+ .pm = &espi_pm,
+ },
+ .probe = of_fsl_espi_probe,
+ .remove = of_fsl_espi_remove,
+};
+module_platform_driver(fsl_espi_driver);
+
+MODULE_AUTHOR("Mingkai Hu");
+MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
new file mode 100644
index 000000000..76e1192eb
--- /dev/null
+++ b/drivers/spi/spi-fsl-lib.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale SPI/eSPI controller driver library.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/spi/spi.h>
+#ifdef CONFIG_FSL_SOC
+#include <sysdev/fsl_soc.h>
+#endif
+
+#include "spi-fsl-lib.h"
+
+#define MPC8XXX_SPI_RX_BUF(type) \
+void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
+{ \
+ type *rx = mpc8xxx_spi->rx; \
+ *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
+ mpc8xxx_spi->rx = rx; \
+} \
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type);
+
+#define MPC8XXX_SPI_TX_BUF(type) \
+u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
+{ \
+ u32 data; \
+ const type *tx = mpc8xxx_spi->tx; \
+ if (!tx) \
+ return 0; \
+ data = *tx++ << mpc8xxx_spi->tx_shift; \
+ mpc8xxx_spi->tx = tx; \
+ return data; \
+} \
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type);
+
+MPC8XXX_SPI_RX_BUF(u8)
+MPC8XXX_SPI_RX_BUF(u16)
+MPC8XXX_SPI_RX_BUF(u32)
+MPC8XXX_SPI_TX_BUF(u8)
+MPC8XXX_SPI_TX_BUF(u16)
+MPC8XXX_SPI_TX_BUF(u32)
+
+struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
+{
+ return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
+}
+EXPORT_SYMBOL_GPL(to_of_pinfo);
+
+const char *mpc8xxx_spi_strmode(unsigned int flags)
+{
+ if (flags & SPI_QE_CPU_MODE) {
+ return "QE CPU";
+ } else if (flags & SPI_CPM_MODE) {
+ if (flags & SPI_QE)
+ return "QE";
+ else if (flags & SPI_CPM2)
+ return "CPM2";
+ else
+ return "CPM1";
+ }
+ return "CPU";
+}
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode);
+
+void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
+ unsigned int irq)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master;
+ struct mpc8xxx_spi *mpc8xxx_spi;
+
+ master = dev_get_drvdata(dev);
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
+ | SPI_LSB_FIRST | SPI_LOOP;
+
+ master->dev.of_node = dev->of_node;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ mpc8xxx_spi->dev = dev;
+ mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
+ mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
+ mpc8xxx_spi->flags = pdata->flags;
+ mpc8xxx_spi->spibrg = pdata->sysclk;
+ mpc8xxx_spi->irq = irq;
+
+ mpc8xxx_spi->rx_shift = 0;
+ mpc8xxx_spi->tx_shift = 0;
+
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->max_chipselect;
+
+ init_completion(&mpc8xxx_spi->done);
+}
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
+
+int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct mpc8xxx_spi_probe_info *pinfo;
+ struct fsl_spi_platform_data *pdata;
+ const void *prop;
+ int ret = -ENOMEM;
+
+ pinfo = devm_kzalloc(&ofdev->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return ret;
+
+ pdata = &pinfo->pdata;
+ dev->platform_data = pdata;
+
+ /* Allocate bus num dynamically. */
+ pdata->bus_num = -1;
+
+#ifdef CONFIG_FSL_SOC
+ /* SPI controller is either clocked from QE or SoC clock. */
+ pdata->sysclk = get_brgfreq();
+ if (pdata->sysclk == -1) {
+ pdata->sysclk = fsl_get_sys_freq();
+ if (pdata->sysclk == -1)
+ return -ENODEV;
+ }
+#else
+ ret = of_property_read_u32(np, "clock-frequency", &pdata->sysclk);
+ if (ret)
+ return ret;
+#endif
+
+ prop = of_get_property(np, "mode", NULL);
+ if (prop && !strcmp(prop, "cpu-qe"))
+ pdata->flags = SPI_QE_CPU_MODE;
+ else if (prop && !strcmp(prop, "qe"))
+ pdata->flags = SPI_CPM_MODE | SPI_QE;
+ else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
+ pdata->flags = SPI_CPM_MODE | SPI_CPM2;
+ else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
+ pdata->flags = SPI_CPM_MODE | SPI_CPM1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
new file mode 100644
index 000000000..015a1abb6
--- /dev/null
+++ b/drivers/spi/spi-fsl-lib.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Freescale SPI/eSPI controller driver library.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ * Copyright (C) 2006 Polycom, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+#ifndef __SPI_FSL_LIB_H__
+#define __SPI_FSL_LIB_H__
+
+#include <asm/io.h>
+
+/* SPI/eSPI Controller driver's private data. */
+struct mpc8xxx_spi {
+ struct device *dev;
+ void __iomem *reg_base;
+
+ /* rx & tx bufs from the spi_transfer */
+ const void *tx;
+ void *rx;
+
+ int subblock;
+ struct spi_pram __iomem *pram;
+#ifdef CONFIG_FSL_SOC
+ struct cpm_buf_desc __iomem *tx_bd;
+ struct cpm_buf_desc __iomem *rx_bd;
+#endif
+
+ struct spi_transfer *xfer_in_progress;
+
+ /* dma addresses for CPM transfers */
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
+ bool map_tx_dma;
+ bool map_rx_dma;
+
+ dma_addr_t dma_dummy_tx;
+ dma_addr_t dma_dummy_rx;
+
+ /* functions to deal with different sized buffers */
+ void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
+ u32(*get_tx) (struct mpc8xxx_spi *);
+
+ unsigned int count;
+ unsigned int irq;
+
+ unsigned nsecs; /* (clock cycle time)/2 */
+
+ u32 spibrg; /* SPIBRG input clock */
+ u32 rx_shift; /* RX data reg shift when in qe mode */
+ u32 tx_shift; /* TX data reg shift when in qe mode */
+
+ unsigned int flags;
+
+#if IS_ENABLED(CONFIG_SPI_FSL_SPI)
+ int type;
+ int native_chipselects;
+ u8 max_bits_per_word;
+
+ void (*set_shifts)(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first);
+#endif
+
+ struct completion done;
+};
+
+struct spi_mpc8xxx_cs {
+ /* functions to deal with different sized buffers */
+ void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
+ u32 (*get_tx) (struct mpc8xxx_spi *);
+ u32 rx_shift; /* RX data reg shift when in qe mode */
+ u32 tx_shift; /* TX data reg shift when in qe mode */
+ u32 hw_mode; /* Holds HW mode register settings */
+};
+
+static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
+{
+ iowrite32be(val, reg);
+}
+
+static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
+{
+ return ioread32be(reg);
+}
+
+struct mpc8xxx_spi_probe_info {
+ struct fsl_spi_platform_data pdata;
+ __be32 __iomem *immr_spi_cs;
+};
+
+extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi);
+extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi);
+extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi);
+extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
+extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
+extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
+
+extern struct mpc8xxx_spi_probe_info *to_of_pinfo(
+ struct fsl_spi_platform_data *pdata);
+extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, unsigned int len);
+extern const char *mpc8xxx_spi_strmode(unsigned int flags);
+extern void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
+ unsigned int irq);
+extern int mpc8xxx_spi_remove(struct device *dev);
+extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev);
+
+#endif /* __SPI_FSL_LIB_H__ */
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
new file mode 100644
index 000000000..9e324d725
--- /dev/null
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -0,0 +1,992 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Freescale i.MX7ULP LPSPI driver
+//
+// Copyright 2016 Freescale Semiconductor, Inc.
+// Copyright 2018 NXP Semiconductors
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/dma/imx-dma.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/types.h>
+
+#define DRIVER_NAME "fsl_lpspi"
+
+#define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
+
+/* The maximum bytes that edma can transfer once.*/
+#define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
+
+/* i.MX7ULP LPSPI registers */
+#define IMX7ULP_VERID 0x0
+#define IMX7ULP_PARAM 0x4
+#define IMX7ULP_CR 0x10
+#define IMX7ULP_SR 0x14
+#define IMX7ULP_IER 0x18
+#define IMX7ULP_DER 0x1c
+#define IMX7ULP_CFGR0 0x20
+#define IMX7ULP_CFGR1 0x24
+#define IMX7ULP_DMR0 0x30
+#define IMX7ULP_DMR1 0x34
+#define IMX7ULP_CCR 0x40
+#define IMX7ULP_FCR 0x58
+#define IMX7ULP_FSR 0x5c
+#define IMX7ULP_TCR 0x60
+#define IMX7ULP_TDR 0x64
+#define IMX7ULP_RSR 0x70
+#define IMX7ULP_RDR 0x74
+
+/* General control register field define */
+#define CR_RRF BIT(9)
+#define CR_RTF BIT(8)
+#define CR_RST BIT(1)
+#define CR_MEN BIT(0)
+#define SR_MBF BIT(24)
+#define SR_TCF BIT(10)
+#define SR_FCF BIT(9)
+#define SR_RDF BIT(1)
+#define SR_TDF BIT(0)
+#define IER_TCIE BIT(10)
+#define IER_FCIE BIT(9)
+#define IER_RDIE BIT(1)
+#define IER_TDIE BIT(0)
+#define DER_RDDE BIT(1)
+#define DER_TDDE BIT(0)
+#define CFGR1_PCSCFG BIT(27)
+#define CFGR1_PINCFG (BIT(24)|BIT(25))
+#define CFGR1_PCSPOL BIT(8)
+#define CFGR1_NOSTALL BIT(3)
+#define CFGR1_MASTER BIT(0)
+#define FSR_TXCOUNT (0xFF)
+#define RSR_RXEMPTY BIT(1)
+#define TCR_CPOL BIT(31)
+#define TCR_CPHA BIT(30)
+#define TCR_CONT BIT(21)
+#define TCR_CONTC BIT(20)
+#define TCR_RXMSK BIT(19)
+#define TCR_TXMSK BIT(18)
+
+struct lpspi_config {
+ u8 bpw;
+ u8 chip_select;
+ u8 prescale;
+ u16 mode;
+ u32 speed_hz;
+};
+
+struct fsl_lpspi_data {
+ struct device *dev;
+ void __iomem *base;
+ unsigned long base_phys;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
+ bool is_slave;
+ bool is_only_cs1;
+ bool is_first_byte;
+
+ void *rx_buf;
+ const void *tx_buf;
+ void (*tx)(struct fsl_lpspi_data *);
+ void (*rx)(struct fsl_lpspi_data *);
+
+ u32 remain;
+ u8 watermark;
+ u8 txfifosize;
+ u8 rxfifosize;
+
+ struct lpspi_config config;
+ struct completion xfer_done;
+
+ bool slave_aborted;
+
+ /* DMA */
+ bool usedma;
+ struct completion dma_rx_completion;
+ struct completion dma_tx_completion;
+};
+
+static const struct of_device_id fsl_lpspi_dt_ids[] = {
+ { .compatible = "fsl,imx7ulp-spi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
+
+#define LPSPI_BUF_RX(type) \
+static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \
+{ \
+ unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \
+ \
+ if (fsl_lpspi->rx_buf) { \
+ *(type *)fsl_lpspi->rx_buf = val; \
+ fsl_lpspi->rx_buf += sizeof(type); \
+ } \
+}
+
+#define LPSPI_BUF_TX(type) \
+static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \
+{ \
+ type val = 0; \
+ \
+ if (fsl_lpspi->tx_buf) { \
+ val = *(type *)fsl_lpspi->tx_buf; \
+ fsl_lpspi->tx_buf += sizeof(type); \
+ } \
+ \
+ fsl_lpspi->remain -= sizeof(type); \
+ writel(val, fsl_lpspi->base + IMX7ULP_TDR); \
+}
+
+LPSPI_BUF_RX(u8)
+LPSPI_BUF_TX(u8)
+LPSPI_BUF_RX(u16)
+LPSPI_BUF_TX(u16)
+LPSPI_BUF_RX(u32)
+LPSPI_BUF_TX(u32)
+
+static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
+ unsigned int enable)
+{
+ writel(enable, fsl_lpspi->base + IMX7ULP_IER);
+}
+
+static int fsl_lpspi_bytes_per_word(const int bpw)
+{
+ return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
+}
+
+static bool fsl_lpspi_can_dma(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ unsigned int bytes_per_word;
+
+ if (!controller->dma_rx)
+ return false;
+
+ bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
+
+ switch (bytes_per_word) {
+ case 1:
+ case 2:
+ case 4:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
+ if (ret < 0) {
+ dev_err(fsl_lpspi->dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+
+ pm_runtime_mark_last_busy(fsl_lpspi->dev);
+ pm_runtime_put_autosuspend(fsl_lpspi->dev);
+
+ return 0;
+}
+
+static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u8 txfifo_cnt;
+ u32 temp;
+
+ txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
+
+ while (txfifo_cnt < fsl_lpspi->txfifosize) {
+ if (!fsl_lpspi->remain)
+ break;
+ fsl_lpspi->tx(fsl_lpspi);
+ txfifo_cnt++;
+ }
+
+ if (txfifo_cnt < fsl_lpspi->txfifosize) {
+ if (!fsl_lpspi->is_slave) {
+ temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
+ temp &= ~TCR_CONTC;
+ writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+ }
+
+ fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
+ } else
+ fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
+}
+
+static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
+{
+ while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
+ fsl_lpspi->rx(fsl_lpspi);
+}
+
+static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 temp = 0;
+
+ temp |= fsl_lpspi->config.bpw - 1;
+ temp |= (fsl_lpspi->config.mode & 0x3) << 30;
+ temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
+ if (!fsl_lpspi->is_slave) {
+ temp |= fsl_lpspi->config.prescale << 27;
+ /*
+ * Set TCR_CONT will keep SS asserted after current transfer.
+ * For the first transfer, clear TCR_CONTC to assert SS.
+ * For subsequent transfer, set TCR_CONTC to keep SS asserted.
+ */
+ if (!fsl_lpspi->usedma) {
+ temp |= TCR_CONT;
+ if (fsl_lpspi->is_first_byte)
+ temp &= ~TCR_CONTC;
+ else
+ temp |= TCR_CONTC;
+ }
+ }
+ writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+
+ dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
+}
+
+static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 temp;
+
+ if (!fsl_lpspi->usedma)
+ temp = fsl_lpspi->watermark >> 1 |
+ (fsl_lpspi->watermark >> 1) << 16;
+ else
+ temp = fsl_lpspi->watermark >> 1;
+
+ writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
+
+ dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
+}
+
+static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+{
+ struct lpspi_config config = fsl_lpspi->config;
+ unsigned int perclk_rate, scldiv;
+ u8 prescale;
+
+ perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
+
+ if (config.speed_hz > perclk_rate / 2) {
+ dev_err(fsl_lpspi->dev,
+ "per-clk should be at least two times of transfer speed");
+ return -EINVAL;
+ }
+
+ for (prescale = 0; prescale < 8; prescale++) {
+ scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2;
+ if (scldiv < 256) {
+ fsl_lpspi->config.prescale = prescale;
+ break;
+ }
+ }
+
+ if (scldiv >= 256)
+ return -EINVAL;
+
+ writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
+ fsl_lpspi->base + IMX7ULP_CCR);
+
+ dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n",
+ perclk_rate, config.speed_hz, prescale, scldiv);
+
+ return 0;
+}
+
+static int fsl_lpspi_dma_configure(struct spi_controller *controller)
+{
+ int ret;
+ enum dma_slave_buswidth buswidth;
+ struct dma_slave_config rx = {}, tx = {};
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+
+ switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
+ case 4:
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ case 2:
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 1:
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tx.direction = DMA_MEM_TO_DEV;
+ tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
+ tx.dst_addr_width = buswidth;
+ tx.dst_maxburst = 1;
+ ret = dmaengine_slave_config(controller->dma_tx, &tx);
+ if (ret) {
+ dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
+ ret);
+ return ret;
+ }
+
+ rx.direction = DMA_DEV_TO_MEM;
+ rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
+ rx.src_addr_width = buswidth;
+ rx.src_maxburst = 1;
+ ret = dmaengine_slave_config(controller->dma_rx, &rx);
+ if (ret) {
+ dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 temp;
+ int ret;
+
+ if (!fsl_lpspi->is_slave) {
+ ret = fsl_lpspi_set_bitrate(fsl_lpspi);
+ if (ret)
+ return ret;
+ }
+
+ fsl_lpspi_set_watermark(fsl_lpspi);
+
+ if (!fsl_lpspi->is_slave)
+ temp = CFGR1_MASTER;
+ else
+ temp = CFGR1_PINCFG;
+ if (fsl_lpspi->config.mode & SPI_CS_HIGH)
+ temp |= CFGR1_PCSPOL;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
+
+ temp = readl(fsl_lpspi->base + IMX7ULP_CR);
+ temp |= CR_RRF | CR_RTF | CR_MEN;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+
+ temp = 0;
+ if (fsl_lpspi->usedma)
+ temp = DER_TDDE | DER_RDDE;
+ writel(temp, fsl_lpspi->base + IMX7ULP_DER);
+
+ return 0;
+}
+
+static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(spi->controller);
+
+ if (t == NULL)
+ return -EINVAL;
+
+ fsl_lpspi->config.mode = spi->mode;
+ fsl_lpspi->config.bpw = t->bits_per_word;
+ fsl_lpspi->config.speed_hz = t->speed_hz;
+ if (fsl_lpspi->is_only_cs1)
+ fsl_lpspi->config.chip_select = 1;
+ else
+ fsl_lpspi->config.chip_select = spi->chip_select;
+
+ if (!fsl_lpspi->config.speed_hz)
+ fsl_lpspi->config.speed_hz = spi->max_speed_hz;
+ if (!fsl_lpspi->config.bpw)
+ fsl_lpspi->config.bpw = spi->bits_per_word;
+
+ /* Initialize the functions for transfer */
+ if (fsl_lpspi->config.bpw <= 8) {
+ fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
+ fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
+ } else if (fsl_lpspi->config.bpw <= 16) {
+ fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
+ fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
+ } else {
+ fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
+ fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
+ }
+
+ if (t->len <= fsl_lpspi->txfifosize)
+ fsl_lpspi->watermark = t->len;
+ else
+ fsl_lpspi->watermark = fsl_lpspi->txfifosize;
+
+ if (fsl_lpspi_can_dma(controller, spi, t))
+ fsl_lpspi->usedma = true;
+ else
+ fsl_lpspi->usedma = false;
+
+ return fsl_lpspi_config(fsl_lpspi);
+}
+
+static int fsl_lpspi_slave_abort(struct spi_controller *controller)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+
+ fsl_lpspi->slave_aborted = true;
+ if (!fsl_lpspi->usedma)
+ complete(&fsl_lpspi->xfer_done);
+ else {
+ complete(&fsl_lpspi->dma_tx_completion);
+ complete(&fsl_lpspi->dma_rx_completion);
+ }
+
+ return 0;
+}
+
+static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+
+ if (fsl_lpspi->is_slave) {
+ if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
+ fsl_lpspi->slave_aborted) {
+ dev_dbg(fsl_lpspi->dev, "interrupted\n");
+ return -EINTR;
+ }
+ } else {
+ if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
+ dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 temp;
+
+ if (!fsl_lpspi->usedma) {
+ /* Disable all interrupt */
+ fsl_lpspi_intctrl(fsl_lpspi, 0);
+ }
+
+ /* W1C for all flags in SR */
+ temp = 0x3F << 8;
+ writel(temp, fsl_lpspi->base + IMX7ULP_SR);
+
+ /* Clear FIFO and disable module */
+ temp = CR_RRF | CR_RTF;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+
+ return 0;
+}
+
+static void fsl_lpspi_dma_rx_callback(void *cookie)
+{
+ struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
+
+ complete(&fsl_lpspi->dma_rx_completion);
+}
+
+static void fsl_lpspi_dma_tx_callback(void *cookie)
+{
+ struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
+
+ complete(&fsl_lpspi->dma_tx_completion);
+}
+
+static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
+ int size)
+{
+ unsigned long timeout = 0;
+
+ /* Time with actual data transfer and CS change delay related to HW */
+ timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
+
+ /* Add extra second for scheduler related activities */
+ timeout += 1;
+
+ /* Double calculated timeout */
+ return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+}
+
+static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
+ struct fsl_lpspi_data *fsl_lpspi,
+ struct spi_transfer *transfer)
+{
+ struct dma_async_tx_descriptor *desc_tx, *desc_rx;
+ unsigned long transfer_timeout;
+ unsigned long timeout;
+ struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
+ int ret;
+
+ ret = fsl_lpspi_dma_configure(controller);
+ if (ret)
+ return ret;
+
+ desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
+ rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx)
+ return -EINVAL;
+
+ desc_rx->callback = fsl_lpspi_dma_rx_callback;
+ desc_rx->callback_param = (void *)fsl_lpspi;
+ dmaengine_submit(desc_rx);
+ reinit_completion(&fsl_lpspi->dma_rx_completion);
+ dma_async_issue_pending(controller->dma_rx);
+
+ desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
+ tx->sgl, tx->nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ dmaengine_terminate_all(controller->dma_tx);
+ return -EINVAL;
+ }
+
+ desc_tx->callback = fsl_lpspi_dma_tx_callback;
+ desc_tx->callback_param = (void *)fsl_lpspi;
+ dmaengine_submit(desc_tx);
+ reinit_completion(&fsl_lpspi->dma_tx_completion);
+ dma_async_issue_pending(controller->dma_tx);
+
+ fsl_lpspi->slave_aborted = false;
+
+ if (!fsl_lpspi->is_slave) {
+ transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
+ transfer->len);
+
+ /* Wait eDMA to finish the data transfer.*/
+ timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -ETIMEDOUT;
+ }
+
+ timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -ETIMEDOUT;
+ }
+ } else {
+ if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
+ fsl_lpspi->slave_aborted) {
+ dev_dbg(fsl_lpspi->dev,
+ "I/O Error in DMA TX interrupted\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -EINTR;
+ }
+
+ if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
+ fsl_lpspi->slave_aborted) {
+ dev_dbg(fsl_lpspi->dev,
+ "I/O Error in DMA RX interrupted\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -EINTR;
+ }
+ }
+
+ fsl_lpspi_reset(fsl_lpspi);
+
+ return 0;
+}
+
+static void fsl_lpspi_dma_exit(struct spi_controller *controller)
+{
+ if (controller->dma_rx) {
+ dma_release_channel(controller->dma_rx);
+ controller->dma_rx = NULL;
+ }
+
+ if (controller->dma_tx) {
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
+ }
+}
+
+static int fsl_lpspi_dma_init(struct device *dev,
+ struct fsl_lpspi_data *fsl_lpspi,
+ struct spi_controller *controller)
+{
+ int ret;
+
+ /* Prepare for TX DMA: */
+ controller->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(controller->dma_tx)) {
+ ret = PTR_ERR(controller->dma_tx);
+ dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
+ controller->dma_tx = NULL;
+ goto err;
+ }
+
+ /* Prepare for RX DMA: */
+ controller->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(controller->dma_rx)) {
+ ret = PTR_ERR(controller->dma_rx);
+ dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
+ controller->dma_rx = NULL;
+ goto err;
+ }
+
+ init_completion(&fsl_lpspi->dma_rx_completion);
+ init_completion(&fsl_lpspi->dma_tx_completion);
+ controller->can_dma = fsl_lpspi_can_dma;
+ controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
+
+ return 0;
+err:
+ fsl_lpspi_dma_exit(controller);
+ return ret;
+}
+
+static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
+ struct spi_transfer *t)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+ int ret;
+
+ fsl_lpspi->tx_buf = t->tx_buf;
+ fsl_lpspi->rx_buf = t->rx_buf;
+ fsl_lpspi->remain = t->len;
+
+ reinit_completion(&fsl_lpspi->xfer_done);
+ fsl_lpspi->slave_aborted = false;
+
+ fsl_lpspi_write_tx_fifo(fsl_lpspi);
+
+ ret = fsl_lpspi_wait_for_completion(controller);
+ if (ret)
+ return ret;
+
+ fsl_lpspi_reset(fsl_lpspi);
+
+ return 0;
+}
+
+static int fsl_lpspi_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+ int ret;
+
+ fsl_lpspi->is_first_byte = true;
+ ret = fsl_lpspi_setup_transfer(controller, spi, t);
+ if (ret < 0)
+ return ret;
+
+ fsl_lpspi_set_cmd(fsl_lpspi);
+ fsl_lpspi->is_first_byte = false;
+
+ if (fsl_lpspi->usedma)
+ ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
+ else
+ ret = fsl_lpspi_pio_transfer(controller, t);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
+{
+ u32 temp_SR, temp_IER;
+ struct fsl_lpspi_data *fsl_lpspi = dev_id;
+
+ temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
+ fsl_lpspi_intctrl(fsl_lpspi, 0);
+ temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
+
+ fsl_lpspi_read_rx_fifo(fsl_lpspi);
+
+ if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
+ fsl_lpspi_write_tx_fifo(fsl_lpspi);
+ return IRQ_HANDLED;
+ }
+
+ if (temp_SR & SR_MBF ||
+ readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
+ writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
+ fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
+ return IRQ_HANDLED;
+ }
+
+ if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
+ writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
+ complete(&fsl_lpspi->xfer_done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+#ifdef CONFIG_PM
+static int fsl_lpspi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *controller = dev_get_drvdata(dev);
+ struct fsl_lpspi_data *fsl_lpspi;
+ int ret;
+
+ fsl_lpspi = spi_controller_get_devdata(controller);
+
+ ret = clk_prepare_enable(fsl_lpspi->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
+ if (ret) {
+ clk_disable_unprepare(fsl_lpspi->clk_per);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_lpspi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *controller = dev_get_drvdata(dev);
+ struct fsl_lpspi_data *fsl_lpspi;
+
+ fsl_lpspi = spi_controller_get_devdata(controller);
+
+ clk_disable_unprepare(fsl_lpspi->clk_per);
+ clk_disable_unprepare(fsl_lpspi->clk_ipg);
+
+ return 0;
+}
+#endif
+
+static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
+{
+ struct device *dev = fsl_lpspi->dev;
+
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+
+ return 0;
+}
+
+static int fsl_lpspi_probe(struct platform_device *pdev)
+{
+ struct fsl_lpspi_data *fsl_lpspi;
+ struct spi_controller *controller;
+ struct resource *res;
+ int ret, irq;
+ u32 temp;
+ bool is_slave;
+
+ is_slave = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
+ if (is_slave)
+ controller = spi_alloc_slave(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
+ else
+ controller = spi_alloc_master(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
+
+ if (!controller)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, controller);
+
+ fsl_lpspi = spi_controller_get_devdata(controller);
+ fsl_lpspi->dev = &pdev->dev;
+ fsl_lpspi->is_slave = is_slave;
+ fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
+ "fsl,spi-only-use-cs1-sel");
+
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ controller->transfer_one = fsl_lpspi_transfer_one;
+ controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+ controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->dev.of_node = pdev->dev.of_node;
+ controller->bus_num = pdev->id;
+ controller->slave_abort = fsl_lpspi_slave_abort;
+ if (!fsl_lpspi->is_slave)
+ controller->use_gpio_descriptors = true;
+
+ init_completion(&fsl_lpspi->xfer_done);
+
+ fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(fsl_lpspi->base)) {
+ ret = PTR_ERR(fsl_lpspi->base);
+ goto out_controller_put;
+ }
+ fsl_lpspi->base_phys = res->start;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_controller_put;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
+ dev_name(&pdev->dev), fsl_lpspi);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
+ goto out_controller_put;
+ }
+
+ fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(fsl_lpspi->clk_per)) {
+ ret = PTR_ERR(fsl_lpspi->clk_per);
+ goto out_controller_put;
+ }
+
+ fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fsl_lpspi->clk_ipg)) {
+ ret = PTR_ERR(fsl_lpspi->clk_ipg);
+ goto out_controller_put;
+ }
+
+ /* enable the clock */
+ ret = fsl_lpspi_init_rpm(fsl_lpspi);
+ if (ret)
+ goto out_controller_put;
+
+ ret = pm_runtime_get_sync(fsl_lpspi->dev);
+ if (ret < 0) {
+ dev_err(fsl_lpspi->dev, "failed to enable clock\n");
+ goto out_pm_get;
+ }
+
+ temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
+ fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
+ fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
+
+ ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
+ if (ret == -EPROBE_DEFER)
+ goto out_pm_get;
+ if (ret < 0)
+ dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
+ else
+ /*
+ * disable LPSPI module IRQ when enable DMA mode successfully,
+ * to prevent the unexpected LPSPI module IRQ events.
+ */
+ disable_irq(irq);
+
+ ret = devm_spi_register_controller(&pdev->dev, controller);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
+ goto free_dma;
+ }
+
+ pm_runtime_mark_last_busy(fsl_lpspi->dev);
+ pm_runtime_put_autosuspend(fsl_lpspi->dev);
+
+ return 0;
+
+free_dma:
+ fsl_lpspi_dma_exit(controller);
+out_pm_get:
+ pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
+ pm_runtime_put_sync(fsl_lpspi->dev);
+ pm_runtime_disable(fsl_lpspi->dev);
+out_controller_put:
+ spi_controller_put(controller);
+
+ return ret;
+}
+
+static int fsl_lpspi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *controller = platform_get_drvdata(pdev);
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+
+ fsl_lpspi_dma_exit(controller);
+
+ pm_runtime_disable(fsl_lpspi->dev);
+ return 0;
+}
+
+static int __maybe_unused fsl_lpspi_suspend(struct device *dev)
+{
+ pinctrl_pm_select_sleep_state(dev);
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused fsl_lpspi_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret) {
+ dev_err(dev, "Error in resume: %d\n", ret);
+ return ret;
+ }
+
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops fsl_lpspi_pm_ops = {
+ SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
+ fsl_lpspi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
+};
+
+static struct platform_driver fsl_lpspi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = fsl_lpspi_dt_ids,
+ .pm = &fsl_lpspi_pm_ops,
+ },
+ .probe = fsl_lpspi_probe,
+ .remove = fsl_lpspi_remove,
+};
+module_platform_driver(fsl_lpspi_driver);
+
+MODULE_DESCRIPTION("LPSPI Controller driver");
+MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
new file mode 100644
index 000000000..85cc71ba6
--- /dev/null
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -0,0 +1,1013 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Freescale QuadSPI driver.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2018 Bootlin
+ * Copyright (C) 2018 exceet electronics GmbH
+ * Copyright (C) 2018 Kontron Electronics GmbH
+ *
+ * Transition to SPI MEM interface:
+ * Authors:
+ * Boris Brezillon <bbrezillon@kernel.org>
+ * Frieder Schrempf <frieder.schrempf@kontron.de>
+ * Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
+ * Suresh Gupta <suresh.gupta@nxp.com>
+ *
+ * Based on the original fsl-quadspi.c SPI NOR driver:
+ * Author: Freescale Semiconductor, Inc.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/sizes.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/*
+ * The driver only uses one single LUT entry, that is updated on
+ * each call of exec_op(). Index 0 is preset at boot with a basic
+ * read operation, so let's use the last entry (15).
+ */
+#define SEQID_LUT 15
+
+/* Registers used by the driver */
+#define QUADSPI_MCR 0x00
+#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16)
+#define QUADSPI_MCR_MDIS_MASK BIT(14)
+#define QUADSPI_MCR_CLR_TXF_MASK BIT(11)
+#define QUADSPI_MCR_CLR_RXF_MASK BIT(10)
+#define QUADSPI_MCR_DDR_EN_MASK BIT(7)
+#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2)
+#define QUADSPI_MCR_SWRSTHD_MASK BIT(1)
+#define QUADSPI_MCR_SWRSTSD_MASK BIT(0)
+
+#define QUADSPI_IPCR 0x08
+#define QUADSPI_IPCR_SEQID(x) ((x) << 24)
+
+#define QUADSPI_FLSHCR 0x0c
+#define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0)
+#define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8)
+#define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16)
+
+#define QUADSPI_BUF0CR 0x10
+#define QUADSPI_BUF1CR 0x14
+#define QUADSPI_BUF2CR 0x18
+#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
+
+#define QUADSPI_BUF3CR 0x1c
+#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
+#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
+#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8)
+
+#define QUADSPI_BFGENCR 0x20
+#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12)
+
+#define QUADSPI_BUF0IND 0x30
+#define QUADSPI_BUF1IND 0x34
+#define QUADSPI_BUF2IND 0x38
+#define QUADSPI_SFAR 0x100
+
+#define QUADSPI_SMPR 0x108
+#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16)
+#define QUADSPI_SMPR_FSDLY_MASK BIT(6)
+#define QUADSPI_SMPR_FSPHS_MASK BIT(5)
+#define QUADSPI_SMPR_HSENA_MASK BIT(0)
+
+#define QUADSPI_RBCT 0x110
+#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0)
+#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8)
+
+#define QUADSPI_TBDR 0x154
+
+#define QUADSPI_SR 0x15c
+#define QUADSPI_SR_IP_ACC_MASK BIT(1)
+#define QUADSPI_SR_AHB_ACC_MASK BIT(2)
+
+#define QUADSPI_FR 0x160
+#define QUADSPI_FR_TFF_MASK BIT(0)
+
+#define QUADSPI_RSER 0x164
+#define QUADSPI_RSER_TFIE BIT(0)
+
+#define QUADSPI_SPTRCLR 0x16c
+#define QUADSPI_SPTRCLR_IPPTRC BIT(8)
+#define QUADSPI_SPTRCLR_BFPTRC BIT(0)
+
+#define QUADSPI_SFA1AD 0x180
+#define QUADSPI_SFA2AD 0x184
+#define QUADSPI_SFB1AD 0x188
+#define QUADSPI_SFB2AD 0x18c
+#define QUADSPI_RBDR(x) (0x200 + ((x) * 4))
+
+#define QUADSPI_LUTKEY 0x300
+#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define QUADSPI_LCKCR 0x304
+#define QUADSPI_LCKER_LOCK BIT(0)
+#define QUADSPI_LCKER_UNLOCK BIT(1)
+
+#define QUADSPI_LUT_BASE 0x310
+#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
+#define QUADSPI_LUT_REG(idx) \
+ (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
+
+/* Instruction set for the LUT register */
+#define LUT_STOP 0
+#define LUT_CMD 1
+#define LUT_ADDR 2
+#define LUT_DUMMY 3
+#define LUT_MODE 4
+#define LUT_MODE2 5
+#define LUT_MODE4 6
+#define LUT_FSL_READ 7
+#define LUT_FSL_WRITE 8
+#define LUT_JMP_ON_CS 9
+#define LUT_ADDR_DDR 10
+#define LUT_MODE_DDR 11
+#define LUT_MODE2_DDR 12
+#define LUT_MODE4_DDR 13
+#define LUT_FSL_READ_DDR 14
+#define LUT_FSL_WRITE_DDR 15
+#define LUT_DATA_LEARN 16
+
+/*
+ * The PAD definitions for LUT register.
+ *
+ * The pad stands for the number of IO lines [0:3].
+ * For example, the quad read needs four IO lines,
+ * so you should use LUT_PAD(4).
+ */
+#define LUT_PAD(x) (fls(x) - 1)
+
+/*
+ * Macro for constructing the LUT entries with the following
+ * register layout:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define LUT_DEF(idx, ins, pad, opr) \
+ ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
+
+/* Controller needs driver to swap endianness */
+#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0)
+
+/* Controller needs 4x internal clock */
+#define QUADSPI_QUIRK_4X_INT_CLK BIT(1)
+
+/*
+ * TKT253890, the controller needs the driver to fill the txfifo with
+ * 16 bytes at least to trigger a data transfer, even though the extra
+ * data won't be transferred.
+ */
+#define QUADSPI_QUIRK_TKT253890 BIT(2)
+
+/* TKT245618, the controller cannot wake up from wait mode */
+#define QUADSPI_QUIRK_TKT245618 BIT(3)
+
+/*
+ * Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
+ * internally. No need to add it when setting SFXXAD and SFAR registers
+ */
+#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
+
+/*
+ * Controller uses TDH bits in register QUADSPI_FLSHCR.
+ * They need to be set in accordance with the DDR/SDR mode.
+ */
+#define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5)
+
+struct fsl_qspi_devtype_data {
+ unsigned int rxfifo;
+ unsigned int txfifo;
+ int invalid_mstrid;
+ unsigned int ahb_buf_size;
+ unsigned int quirks;
+ bool little_endian;
+};
+
+static const struct fsl_qspi_devtype_data vybrid_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx6sx_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx7d_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
+ QUADSPI_QUIRK_USE_TDH_SETTING,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx6ul_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
+ QUADSPI_QUIRK_USE_TDH_SETTING,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data ls1021a_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
+ .ahb_buf_size = SZ_1K,
+ .quirks = 0,
+ .little_endian = false,
+};
+
+static const struct fsl_qspi_devtype_data ls2080a_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_1K,
+ .invalid_mstrid = 0x0,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
+ .little_endian = true,
+};
+
+struct fsl_qspi {
+ void __iomem *iobase;
+ void __iomem *ahb_addr;
+ u32 memmap_phy;
+ struct clk *clk, *clk_en;
+ struct device *dev;
+ struct completion c;
+ const struct fsl_qspi_devtype_data *devtype_data;
+ struct mutex lock;
+ struct pm_qos_request pm_qos_req;
+ int selected;
+};
+
+static inline int needs_swap_endian(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
+}
+
+static inline int needs_4x_clock(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
+}
+
+static inline int needs_fill_txfifo(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
+}
+
+static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
+}
+
+static inline int needs_amba_base_offset(struct fsl_qspi *q)
+{
+ return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
+}
+
+static inline int needs_tdh_setting(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING;
+}
+
+/*
+ * An IC bug makes it necessary to rearrange the 32-bit data.
+ * Later chips, such as IMX6SLX, have fixed this bug.
+ */
+static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
+{
+ return needs_swap_endian(q) ? __swab32(a) : a;
+}
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The QSPI controller's endianness is independent of
+ * the CPU core's endianness. So far, although the CPU
+ * core is little-endian the QSPI controller can use
+ * big-endian or little-endian.
+ */
+static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
+{
+ if (q->devtype_data->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
+{
+ if (q->devtype_data->little_endian)
+ return ioread32(addr);
+
+ return ioread32be(addr);
+}
+
+static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
+{
+ struct fsl_qspi *q = dev_id;
+ u32 reg;
+
+ /* clear interrupt */
+ reg = qspi_readl(q, q->iobase + QUADSPI_FR);
+ qspi_writel(q, reg, q->iobase + QUADSPI_FR);
+
+ if (reg & QUADSPI_FR_TFF_MASK)
+ complete(&q->c);
+
+ dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg);
+ return IRQ_HANDLED;
+}
+
+static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
+{
+ switch (width) {
+ case 1:
+ case 2:
+ case 4:
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static bool fsl_qspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
+
+ if (op->addr.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
+
+ if (op->dummy.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
+
+ if (op->data.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
+
+ if (ret)
+ return false;
+
+ /*
+ * The number of instructions needed for the op, needs
+ * to fit into a single LUT entry.
+ */
+ if (op->addr.nbytes +
+ (op->dummy.nbytes ? 1:0) +
+ (op->data.nbytes ? 1:0) > 6)
+ return false;
+
+ /* Max 64 dummy clock cycles supported */
+ if (op->dummy.nbytes &&
+ (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
+ return false;
+
+ /* Max data length, check controller limits and alignment */
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ (op->data.nbytes > q->devtype_data->ahb_buf_size ||
+ (op->data.nbytes > q->devtype_data->rxfifo - 4 &&
+ !IS_ALIGNED(op->data.nbytes, 8))))
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT &&
+ op->data.nbytes > q->devtype_data->txfifo)
+ return false;
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ u32 lutval[4] = {};
+ int lutidx = 1, i;
+
+ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode);
+
+ /*
+ * For some unknown reason, using LUT_ADDR doesn't work in some
+ * cases (at least with only one byte long addresses), so
+ * let's use LUT_MODE to write the address bytes one by one
+ */
+ for (i = 0; i < op->addr.nbytes; i++) {
+ u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
+
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
+ LUT_PAD(op->addr.buswidth),
+ addrbyte);
+ lutidx++;
+ }
+
+ if (op->dummy.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
+ LUT_PAD(op->dummy.buswidth),
+ op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ lutidx++;
+ }
+
+ if (op->data.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ LUT_FSL_READ : LUT_FSL_WRITE,
+ LUT_PAD(op->data.buswidth),
+ 0);
+ lutidx++;
+ }
+
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
+
+ /* unlock LUT */
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
+
+ /* fill LUT */
+ for (i = 0; i < ARRAY_SIZE(lutval); i++)
+ qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
+
+ /* lock LUT */
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
+{
+ int ret;
+
+ ret = clk_prepare_enable(q->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(q->clk);
+ if (ret) {
+ clk_disable_unprepare(q->clk_en);
+ return ret;
+ }
+
+ if (needs_wakeup_wait_mode(q))
+ cpu_latency_qos_add_request(&q->pm_qos_req, 0);
+
+ return 0;
+}
+
+static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
+{
+ if (needs_wakeup_wait_mode(q))
+ cpu_latency_qos_remove_request(&q->pm_qos_req);
+
+ clk_disable_unprepare(q->clk);
+ clk_disable_unprepare(q->clk_en);
+}
+
+/*
+ * If we have changed the content of the flash by writing or erasing, or if we
+ * read from flash with a different offset into the page buffer, we need to
+ * invalidate the AHB buffer. If we do not do so, we may read out the wrong
+ * data. The spec tells us reset the AHB domain and Serial Flash domain at
+ * the same time.
+ */
+static void fsl_qspi_invalidate(struct fsl_qspi *q)
+{
+ u32 reg;
+
+ reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
+ reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+
+ /*
+ * The minimum delay : 1 AHB + 2 SFCK clocks.
+ * Delay 1 us is enough.
+ */
+ udelay(1);
+
+ reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+}
+
+static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
+{
+ unsigned long rate = spi->max_speed_hz;
+ int ret;
+
+ if (q->selected == spi->chip_select)
+ return;
+
+ if (needs_4x_clock(q))
+ rate *= 4;
+
+ fsl_qspi_clk_disable_unprep(q);
+
+ ret = clk_set_rate(q->clk, rate);
+ if (ret)
+ return;
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return;
+
+ q->selected = spi->chip_select;
+
+ fsl_qspi_invalidate(q);
+}
+
+static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
+{
+ memcpy_fromio(op->data.buf.in,
+ q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size,
+ op->data.nbytes);
+}
+
+static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int i;
+ u32 val;
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
+ memcpy(&val, op->data.buf.out + i, 4);
+ val = fsl_qspi_endian_xchg(q, val);
+ qspi_writel(q, val, base + QUADSPI_TBDR);
+ }
+
+ if (i < op->data.nbytes) {
+ memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
+ val = fsl_qspi_endian_xchg(q, val);
+ qspi_writel(q, val, base + QUADSPI_TBDR);
+ }
+
+ if (needs_fill_txfifo(q)) {
+ for (i = op->data.nbytes; i < 16; i += 4)
+ qspi_writel(q, 0, base + QUADSPI_TBDR);
+ }
+}
+
+static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int i;
+ u8 *buf = op->data.buf.in;
+ u32 val;
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
+ val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
+ val = fsl_qspi_endian_xchg(q, val);
+ memcpy(buf + i, &val, 4);
+ }
+
+ if (i < op->data.nbytes) {
+ val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
+ val = fsl_qspi_endian_xchg(q, val);
+ memcpy(buf + i, &val, op->data.nbytes - i);
+ }
+}
+
+static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int err = 0;
+
+ init_completion(&q->c);
+
+ /*
+ * Always start the sequence at the same index since we update
+ * the LUT at each exec_op() call. And also specify the DATA
+ * length, since it's has not been specified in the LUT.
+ */
+ qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
+ base + QUADSPI_IPCR);
+
+ /* Wait for the interrupt. */
+ if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+ if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ fsl_qspi_read_rxfifo(q, op);
+
+ return err;
+}
+
+static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
+ u32 mask, u32 delay_us, u32 timeout_us)
+{
+ u32 reg;
+
+ if (!q->devtype_data->little_endian)
+ mask = (u32)cpu_to_be32(mask);
+
+ return readl_poll_timeout(base, reg, !(reg & mask), delay_us,
+ timeout_us);
+}
+
+static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ void __iomem *base = q->iobase;
+ u32 addr_offset = 0;
+ int err = 0;
+ int invalid_mstrid = q->devtype_data->invalid_mstrid;
+
+ mutex_lock(&q->lock);
+
+ /* wait for the controller being ready */
+ fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
+ QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
+
+ fsl_qspi_select_mem(q, mem->spi);
+
+ if (needs_amba_base_offset(q))
+ addr_offset = q->memmap_phy;
+
+ qspi_writel(q,
+ q->selected * q->devtype_data->ahb_buf_size + addr_offset,
+ base + QUADSPI_SFAR);
+
+ qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
+ QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
+ base + QUADSPI_MCR);
+
+ qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
+ base + QUADSPI_SPTRCLR);
+
+ qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF0CR);
+ qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF1CR);
+ qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF2CR);
+
+ fsl_qspi_prepare_lut(q, op);
+
+ /*
+ * If we have large chunks of data, we read them through the AHB bus
+ * by accessing the mapped memory. In all other cases we use
+ * IP commands to access the flash.
+ */
+ if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
+ op->data.dir == SPI_MEM_DATA_IN) {
+ fsl_qspi_read_ahb(q, op);
+ } else {
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
+ QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
+
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ fsl_qspi_fill_txfifo(q, op);
+
+ err = fsl_qspi_do_op(q, op);
+ }
+
+ /* Invalidate the data in the AHB buffer. */
+ fsl_qspi_invalidate(q);
+
+ mutex_unlock(&q->lock);
+
+ return err;
+}
+
+static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes > q->devtype_data->txfifo)
+ op->data.nbytes = q->devtype_data->txfifo;
+ } else {
+ if (op->data.nbytes > q->devtype_data->ahb_buf_size)
+ op->data.nbytes = q->devtype_data->ahb_buf_size;
+ else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
+ op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
+ }
+
+ return 0;
+}
+
+static int fsl_qspi_default_setup(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ u32 reg, addr_offset = 0;
+ int ret;
+
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
+ /* the default frequency, we will change it later if necessary. */
+ ret = clk_set_rate(q->clk, 66000000);
+ if (ret)
+ return ret;
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
+ /* Reset the module */
+ qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
+ base + QUADSPI_MCR);
+ udelay(1);
+
+ /* Disable the module */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
+ base + QUADSPI_MCR);
+
+ /*
+ * Previous boot stages (BootROM, bootloader) might have used DDR
+ * mode and did not clear the TDH bits. As we currently use SDR mode
+ * only, clear the TDH bits if necessary.
+ */
+ if (needs_tdh_setting(q))
+ qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
+ ~QUADSPI_FLSHCR_TDH_MASK,
+ base + QUADSPI_FLSHCR);
+
+ reg = qspi_readl(q, base + QUADSPI_SMPR);
+ qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
+ | QUADSPI_SMPR_FSPHS_MASK
+ | QUADSPI_SMPR_HSENA_MASK
+ | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
+
+ /* We only use the buffer3 for AHB read */
+ qspi_writel(q, 0, base + QUADSPI_BUF0IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF1IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF2IND);
+
+ qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
+ q->iobase + QUADSPI_BFGENCR);
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
+ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
+ QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
+ base + QUADSPI_BUF3CR);
+
+ if (needs_amba_base_offset(q))
+ addr_offset = q->memmap_phy;
+
+ /*
+ * In HW there can be a maximum of four chips on two buses with
+ * two chip selects on each bus. We use four chip selects in SW
+ * to differentiate between the four chips.
+ * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
+ * SFB2AD accordingly.
+ */
+ qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset,
+ base + QUADSPI_SFA1AD);
+ qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset,
+ base + QUADSPI_SFA2AD);
+ qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset,
+ base + QUADSPI_SFB1AD);
+ qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset,
+ base + QUADSPI_SFB2AD);
+
+ q->selected = -1;
+
+ /* Enable the module */
+ qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
+ base + QUADSPI_MCR);
+
+ /* clear all interrupt status */
+ qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
+
+ /* enable the interrupt */
+ qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
+
+ return 0;
+}
+
+static const char *fsl_qspi_get_name(struct spi_mem *mem)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ struct device *dev = &mem->spi->dev;
+ const char *name;
+
+ /*
+ * In order to keep mtdparts compatible with the old MTD driver at
+ * mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the
+ * platform_device of the controller.
+ */
+ if (of_get_available_child_count(q->dev->of_node) == 1)
+ return dev_name(q->dev);
+
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d", dev_name(q->dev),
+ mem->spi->chip_select);
+
+ if (!name) {
+ dev_err(dev, "failed to get memory for custom flash name\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return name;
+}
+
+static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
+ .adjust_op_size = fsl_qspi_adjust_op_size,
+ .supports_op = fsl_qspi_supports_op,
+ .exec_op = fsl_qspi_exec_op,
+ .get_name = fsl_qspi_get_name,
+};
+
+static int fsl_qspi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ struct fsl_qspi *q;
+ int ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*q));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+
+ q = spi_controller_get_devdata(ctlr);
+ q->dev = dev;
+ q->devtype_data = of_device_get_match_data(dev);
+ if (!q->devtype_data) {
+ ret = -ENODEV;
+ goto err_put_ctrl;
+ }
+
+ platform_set_drvdata(pdev, q);
+
+ /* find the resources */
+ q->iobase = devm_platform_ioremap_resource_byname(pdev, "QuadSPI");
+ if (IS_ERR(q->iobase)) {
+ ret = PTR_ERR(q->iobase);
+ goto err_put_ctrl;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "QuadSPI-memory");
+ if (!res) {
+ ret = -EINVAL;
+ goto err_put_ctrl;
+ }
+ q->memmap_phy = res->start;
+ /* Since there are 4 cs, map size required is 4 times ahb_buf_size */
+ q->ahb_addr = devm_ioremap(dev, q->memmap_phy,
+ (q->devtype_data->ahb_buf_size * 4));
+ if (!q->ahb_addr) {
+ ret = -ENOMEM;
+ goto err_put_ctrl;
+ }
+
+ /* find the clocks */
+ q->clk_en = devm_clk_get(dev, "qspi_en");
+ if (IS_ERR(q->clk_en)) {
+ ret = PTR_ERR(q->clk_en);
+ goto err_put_ctrl;
+ }
+
+ q->clk = devm_clk_get(dev, "qspi");
+ if (IS_ERR(q->clk)) {
+ ret = PTR_ERR(q->clk);
+ goto err_put_ctrl;
+ }
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ goto err_put_ctrl;
+ }
+
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_disable_clk;
+
+ ret = devm_request_irq(dev, ret,
+ fsl_qspi_irq_handler, 0, pdev->name, q);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto err_disable_clk;
+ }
+
+ mutex_init(&q->lock);
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 4;
+ ctlr->mem_ops = &fsl_qspi_mem_ops;
+
+ fsl_qspi_default_setup(q);
+
+ ctlr->dev.of_node = np;
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret)
+ goto err_destroy_mutex;
+
+ return 0;
+
+err_destroy_mutex:
+ mutex_destroy(&q->lock);
+
+err_disable_clk:
+ fsl_qspi_clk_disable_unprep(q);
+
+err_put_ctrl:
+ spi_controller_put(ctlr);
+
+ dev_err(dev, "Freescale QuadSPI probe failed\n");
+ return ret;
+}
+
+static int fsl_qspi_remove(struct platform_device *pdev)
+{
+ struct fsl_qspi *q = platform_get_drvdata(pdev);
+
+ /* disable the hardware */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
+
+ fsl_qspi_clk_disable_unprep(q);
+
+ mutex_destroy(&q->lock);
+
+ return 0;
+}
+
+static int fsl_qspi_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int fsl_qspi_resume(struct device *dev)
+{
+ struct fsl_qspi *q = dev_get_drvdata(dev);
+
+ fsl_qspi_default_setup(q);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_qspi_dt_ids[] = {
+ { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
+ { .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, },
+ { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
+
+static const struct dev_pm_ops fsl_qspi_pm_ops = {
+ .suspend = fsl_qspi_suspend,
+ .resume = fsl_qspi_resume,
+};
+
+static struct platform_driver fsl_qspi_driver = {
+ .driver = {
+ .name = "fsl-quadspi",
+ .of_match_table = fsl_qspi_dt_ids,
+ .pm = &fsl_qspi_pm_ops,
+ },
+ .probe = fsl_qspi_probe,
+ .remove = fsl_qspi_remove,
+};
+module_platform_driver(fsl_qspi_driver);
+
+MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
+MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
+MODULE_AUTHOR("Yogesh Gaur <yogeshnarayan.gaur@nxp.com>");
+MODULE_AUTHOR("Suresh Gupta <suresh.gupta@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
new file mode 100644
index 000000000..b14f430a6
--- /dev/null
+++ b/drivers/spi/spi-fsl-spi.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale SPI controller driver.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * GRLIB support:
+ * Copyright (c) 2012 Aeroflex Gaisler AB.
+ * Author: Andreas Larsson <andreas@gaisler.com>
+ */
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_FSL_SOC
+#include <sysdev/fsl_soc.h>
+#endif
+
+/* Specific to the MPC8306/MPC8309 */
+#define IMMR_SPI_CS_OFFSET 0x14c
+#define SPI_BOOT_SEL_BIT 0x80000000
+
+#include "spi-fsl-lib.h"
+#include "spi-fsl-cpm.h"
+#include "spi-fsl-spi.h"
+
+#define TYPE_FSL 0
+#define TYPE_GRLIB 1
+
+struct fsl_spi_match_data {
+ int type;
+};
+
+static struct fsl_spi_match_data of_fsl_spi_fsl_config = {
+ .type = TYPE_FSL,
+};
+
+static struct fsl_spi_match_data of_fsl_spi_grlib_config = {
+ .type = TYPE_GRLIB,
+};
+
+static const struct of_device_id of_fsl_spi_match[] = {
+ {
+ .compatible = "fsl,spi",
+ .data = &of_fsl_spi_fsl_config,
+ },
+ {
+ .compatible = "aeroflexgaisler,spictrl",
+ .data = &of_fsl_spi_grlib_config,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
+
+static int fsl_spi_get_type(struct device *dev)
+{
+ const struct of_device_id *match;
+
+ if (dev->of_node) {
+ match = of_match_node(of_fsl_spi_match, dev->of_node);
+ if (match && match->data)
+ return ((struct fsl_spi_match_data *)match->data)->type;
+ }
+ return TYPE_FSL;
+}
+
+static void fsl_spi_change_mode(struct spi_device *spi)
+{
+ struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
+ struct spi_mpc8xxx_cs *cs = spi->controller_state;
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
+ __be32 __iomem *mode = &reg_base->mode;
+ unsigned long flags;
+
+ if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
+ return;
+
+ /* Turn off IRQs locally to minimize time that SPI is disabled. */
+ local_irq_save(flags);
+
+ /* Turn off SPI unit prior changing mode */
+ mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
+
+ /* When in CPM mode, we need to reinit tx and rx. */
+ if (mspi->flags & SPI_CPM_MODE) {
+ fsl_spi_cpm_reinit_txrx(mspi);
+ }
+ mpc8xxx_spi_write_reg(mode, cs->hw_mode);
+ local_irq_restore(flags);
+}
+
+static void fsl_spi_qe_cpu_set_shifts(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first)
+{
+ *rx_shift = 0;
+ *tx_shift = 0;
+ if (msb_first) {
+ if (bits_per_word <= 8) {
+ *rx_shift = 16;
+ *tx_shift = 24;
+ } else if (bits_per_word <= 16) {
+ *rx_shift = 16;
+ *tx_shift = 16;
+ }
+ } else {
+ if (bits_per_word <= 8)
+ *rx_shift = 8;
+ }
+}
+
+static void fsl_spi_grlib_set_shifts(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first)
+{
+ *rx_shift = 0;
+ *tx_shift = 0;
+ if (bits_per_word <= 16) {
+ if (msb_first) {
+ *rx_shift = 16; /* LSB in bit 16 */
+ *tx_shift = 32 - bits_per_word; /* MSB in bit 31 */
+ } else {
+ *rx_shift = 16 - bits_per_word; /* MSB in bit 15 */
+ }
+ }
+}
+
+static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ struct spi_device *spi,
+ struct mpc8xxx_spi *mpc8xxx_spi,
+ int bits_per_word)
+{
+ cs->rx_shift = 0;
+ cs->tx_shift = 0;
+ if (bits_per_word <= 8) {
+ cs->get_rx = mpc8xxx_spi_rx_buf_u8;
+ cs->get_tx = mpc8xxx_spi_tx_buf_u8;
+ } else if (bits_per_word <= 16) {
+ cs->get_rx = mpc8xxx_spi_rx_buf_u16;
+ cs->get_tx = mpc8xxx_spi_tx_buf_u16;
+ } else if (bits_per_word <= 32) {
+ cs->get_rx = mpc8xxx_spi_rx_buf_u32;
+ cs->get_tx = mpc8xxx_spi_tx_buf_u32;
+ } else
+ return -EINVAL;
+
+ if (mpc8xxx_spi->set_shifts)
+ mpc8xxx_spi->set_shifts(&cs->rx_shift, &cs->tx_shift,
+ bits_per_word,
+ !(spi->mode & SPI_LSB_FIRST));
+
+ mpc8xxx_spi->rx_shift = cs->rx_shift;
+ mpc8xxx_spi->tx_shift = cs->tx_shift;
+ mpc8xxx_spi->get_rx = cs->get_rx;
+ mpc8xxx_spi->get_tx = cs->get_tx;
+
+ return bits_per_word;
+}
+
+static int fsl_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ int bits_per_word = 0;
+ u8 pm;
+ u32 hz = 0;
+ struct spi_mpc8xxx_cs *cs = spi->controller_state;
+
+ mpc8xxx_spi = spi_master_get_devdata(spi->master);
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ }
+
+ /* spi_transfer level calls that work per-word */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+
+ if (!hz)
+ hz = spi->max_speed_hz;
+
+ if (!(mpc8xxx_spi->flags & SPI_CPM_MODE))
+ bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
+ mpc8xxx_spi,
+ bits_per_word);
+
+ if (bits_per_word < 0)
+ return bits_per_word;
+
+ if (bits_per_word == 32)
+ bits_per_word = 0;
+ else
+ bits_per_word = bits_per_word - 1;
+
+ /* mask out bits we are going to set */
+ cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16
+ | SPMODE_PM(0xF));
+
+ cs->hw_mode |= SPMODE_LEN(bits_per_word);
+
+ if ((mpc8xxx_spi->spibrg / hz) > 64) {
+ cs->hw_mode |= SPMODE_DIV16;
+ pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
+ WARN_ONCE(pm > 16,
+ "%s: Requested speed is too low: %d Hz. Will use %d Hz instead.\n",
+ dev_name(&spi->dev), hz, mpc8xxx_spi->spibrg / 1024);
+ if (pm > 16)
+ pm = 16;
+ } else {
+ pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
+ }
+ if (pm)
+ pm--;
+
+ cs->hw_mode |= SPMODE_PM(pm);
+
+ fsl_spi_change_mode(spi);
+ return 0;
+}
+
+static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, unsigned int len)
+{
+ u32 word;
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
+
+ mspi->count = len;
+
+ /* enable rx ints */
+ mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE);
+
+ /* transmit word */
+ word = mspi->get_tx(mspi);
+ mpc8xxx_spi_write_reg(&reg_base->transmit, word);
+
+ return 0;
+}
+
+static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
+ bool is_dma_mapped)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct fsl_spi_reg __iomem *reg_base;
+ unsigned int len = t->len;
+ u8 bits_per_word;
+ int ret;
+
+ reg_base = mpc8xxx_spi->reg_base;
+ bits_per_word = spi->bits_per_word;
+ if (t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+
+ if (bits_per_word > 8) {
+ /* invalid length? */
+ if (len & 1)
+ return -EINVAL;
+ len /= 2;
+ }
+ if (bits_per_word > 16) {
+ /* invalid length? */
+ if (len & 1)
+ return -EINVAL;
+ len /= 2;
+ }
+
+ mpc8xxx_spi->tx = t->tx_buf;
+ mpc8xxx_spi->rx = t->rx_buf;
+
+ reinit_completion(&mpc8xxx_spi->done);
+
+ if (mpc8xxx_spi->flags & SPI_CPM_MODE)
+ ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
+ else
+ ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&mpc8xxx_spi->done);
+
+ /* disable rx ints */
+ mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+
+ if (mpc8xxx_spi->flags & SPI_CPM_MODE)
+ fsl_spi_cpm_bufs_complete(mpc8xxx_spi);
+
+ return mpc8xxx_spi->count;
+}
+
+static int fsl_spi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *m)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(ctlr);
+ struct spi_transfer *t;
+ struct spi_transfer *first;
+
+ first = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+
+ /*
+ * In CPU mode, optimize large byte transfers to use larger
+ * bits_per_word values to reduce number of interrupts taken.
+ *
+ * Some glitches can appear on the SPI clock when the mode changes.
+ * Check that there is no speed change during the transfer and set it up
+ * now to change the mode without having a chip-select asserted.
+ */
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->speed_hz != first->speed_hz) {
+ dev_err(&m->spi->dev,
+ "speed_hz cannot change during message.\n");
+ return -EINVAL;
+ }
+ if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+ if (t->len < 256 || t->bits_per_word != 8)
+ continue;
+ if ((t->len & 3) == 0)
+ t->bits_per_word = 32;
+ else if ((t->len & 1) == 0)
+ t->bits_per_word = 16;
+ } else {
+ /*
+ * CPM/QE uses Little Endian for words > 8
+ * so transform 16 and 32 bits words into 8 bits
+ * Unfortnatly that doesn't work for LSB so
+ * reject these for now
+ * Note: 32 bits word, LSB works iff
+ * tfcr/rfcr is set to CPMFCR_GBL
+ */
+ if (m->spi->mode & SPI_LSB_FIRST && t->bits_per_word > 8)
+ return -EINVAL;
+ if (t->bits_per_word == 16 || t->bits_per_word == 32)
+ t->bits_per_word = 8; /* pretend its 8 bits */
+ if (t->bits_per_word == 8 && t->len >= 256 &&
+ (mpc8xxx_spi->flags & SPI_CPM1))
+ t->bits_per_word = 16;
+ }
+ }
+ return fsl_spi_setup_transfer(m->spi, first);
+}
+
+static int fsl_spi_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ int status;
+
+ status = fsl_spi_setup_transfer(spi, t);
+ if (status < 0)
+ return status;
+ if (t->len)
+ status = fsl_spi_bufs(spi, t, !!t->tx_dma || !!t->rx_dma);
+ if (status > 0)
+ return -EMSGSIZE;
+
+ return status;
+}
+
+static int fsl_spi_unprepare_message(struct spi_controller *controller,
+ struct spi_message *msg)
+{
+ return fsl_spi_setup_transfer(msg->spi, NULL);
+}
+
+static int fsl_spi_setup(struct spi_device *spi)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_spi_reg __iomem *reg_base;
+ bool initial_setup = false;
+ int retval;
+ u32 hw_mode;
+ struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ if (!cs) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi_set_ctldata(spi, cs);
+ initial_setup = true;
+ }
+ mpc8xxx_spi = spi_master_get_devdata(spi->master);
+
+ reg_base = mpc8xxx_spi->reg_base;
+
+ hw_mode = cs->hw_mode; /* Save original settings */
+ cs->hw_mode = mpc8xxx_spi_read_reg(&reg_base->mode);
+ /* mask out bits we are going to set */
+ cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
+ | SPMODE_REV | SPMODE_LOOP);
+
+ if (spi->mode & SPI_CPHA)
+ cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK;
+ if (spi->mode & SPI_CPOL)
+ cs->hw_mode |= SPMODE_CI_INACTIVEHIGH;
+ if (!(spi->mode & SPI_LSB_FIRST))
+ cs->hw_mode |= SPMODE_REV;
+ if (spi->mode & SPI_LOOP)
+ cs->hw_mode |= SPMODE_LOOP;
+
+ retval = fsl_spi_setup_transfer(spi, NULL);
+ if (retval < 0) {
+ cs->hw_mode = hw_mode; /* Restore settings */
+ if (initial_setup)
+ kfree(cs);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void fsl_spi_cleanup(struct spi_device *spi)
+{
+ struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+
+ kfree(cs);
+ spi_set_ctldata(spi, NULL);
+}
+
+static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
+{
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
+
+ /* We need handle RX first */
+ if (events & SPIE_NE) {
+ u32 rx_data = mpc8xxx_spi_read_reg(&reg_base->receive);
+
+ if (mspi->rx)
+ mspi->get_rx(rx_data, mspi);
+ }
+
+ if ((events & SPIE_NF) == 0)
+ /* spin until TX is done */
+ while (((events =
+ mpc8xxx_spi_read_reg(&reg_base->event)) &
+ SPIE_NF) == 0)
+ cpu_relax();
+
+ /* Clear the events */
+ mpc8xxx_spi_write_reg(&reg_base->event, events);
+
+ mspi->count -= 1;
+ if (mspi->count) {
+ u32 word = mspi->get_tx(mspi);
+
+ mpc8xxx_spi_write_reg(&reg_base->transmit, word);
+ } else {
+ complete(&mspi->done);
+ }
+}
+
+static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
+{
+ struct mpc8xxx_spi *mspi = context_data;
+ irqreturn_t ret = IRQ_NONE;
+ u32 events;
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
+
+ /* Get interrupt events(tx/rx) */
+ events = mpc8xxx_spi_read_reg(&reg_base->event);
+ if (events)
+ ret = IRQ_HANDLED;
+
+ dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
+
+ if (mspi->flags & SPI_CPM_MODE)
+ fsl_spi_cpm_irq(mspi, events);
+ else
+ fsl_spi_cpu_irq(mspi, events);
+
+ return ret;
+}
+
+static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct fsl_spi_reg __iomem *reg_base = mpc8xxx_spi->reg_base;
+ u32 slvsel;
+ u16 cs = spi->chip_select;
+
+ if (cs < mpc8xxx_spi->native_chipselects) {
+ slvsel = mpc8xxx_spi_read_reg(&reg_base->slvsel);
+ slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs));
+ mpc8xxx_spi_write_reg(&reg_base->slvsel, slvsel);
+ }
+}
+
+static void fsl_spi_grlib_probe(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct fsl_spi_reg __iomem *reg_base = mpc8xxx_spi->reg_base;
+ int mbits;
+ u32 capabilities;
+
+ capabilities = mpc8xxx_spi_read_reg(&reg_base->cap);
+
+ mpc8xxx_spi->set_shifts = fsl_spi_grlib_set_shifts;
+ mbits = SPCAP_MAXWLEN(capabilities);
+ if (mbits)
+ mpc8xxx_spi->max_bits_per_word = mbits + 1;
+
+ mpc8xxx_spi->native_chipselects = 0;
+ if (SPCAP_SSEN(capabilities)) {
+ mpc8xxx_spi->native_chipselects = SPCAP_SSSZ(capabilities);
+ mpc8xxx_spi_write_reg(&reg_base->slvsel, 0xffffffff);
+ }
+ master->num_chipselect = mpc8xxx_spi->native_chipselects;
+ master->set_cs = fsl_spi_grlib_cs_control;
+}
+
+static void fsl_spi_cs_control(struct spi_device *spi, bool on)
+{
+ struct device *dev = spi->dev.parent->parent;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+
+ if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
+ return;
+ iowrite32be(on ? 0 : SPI_BOOT_SEL_BIT, pinfo->immr_spi_cs);
+}
+
+static struct spi_master *fsl_spi_probe(struct device *dev,
+ struct resource *mem, unsigned int irq)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master;
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_spi_reg __iomem *reg_base;
+ u32 regval;
+ int ret = 0;
+
+ master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
+ if (master == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_set_drvdata(dev, master);
+
+ mpc8xxx_spi_probe(dev, mem, irq);
+
+ master->setup = fsl_spi_setup;
+ master->cleanup = fsl_spi_cleanup;
+ master->prepare_message = fsl_spi_prepare_message;
+ master->transfer_one = fsl_spi_transfer_one;
+ master->unprepare_message = fsl_spi_unprepare_message;
+ master->use_gpio_descriptors = true;
+ master->set_cs = fsl_spi_cs_control;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ mpc8xxx_spi->max_bits_per_word = 32;
+ mpc8xxx_spi->type = fsl_spi_get_type(dev);
+
+ ret = fsl_spi_cpm_init(mpc8xxx_spi);
+ if (ret)
+ goto err_cpm_init;
+
+ mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(mpc8xxx_spi->reg_base)) {
+ ret = PTR_ERR(mpc8xxx_spi->reg_base);
+ goto err_probe;
+ }
+
+ if (mpc8xxx_spi->type == TYPE_GRLIB)
+ fsl_spi_grlib_probe(dev);
+
+ if (mpc8xxx_spi->flags & SPI_CPM_MODE)
+ master->bits_per_word_mask =
+ (SPI_BPW_RANGE_MASK(4, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32));
+ else
+ master->bits_per_word_mask =
+ (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32));
+
+ master->bits_per_word_mask &=
+ SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
+
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
+ mpc8xxx_spi->set_shifts = fsl_spi_qe_cpu_set_shifts;
+
+ if (mpc8xxx_spi->set_shifts)
+ /* 8 bits per word and MSB first */
+ mpc8xxx_spi->set_shifts(&mpc8xxx_spi->rx_shift,
+ &mpc8xxx_spi->tx_shift, 8, 1);
+
+ /* Register for SPI Interrupt */
+ ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_spi_irq,
+ 0, "fsl_spi", mpc8xxx_spi);
+
+ if (ret != 0)
+ goto err_probe;
+
+ reg_base = mpc8xxx_spi->reg_base;
+
+ /* SPI controller initializations */
+ mpc8xxx_spi_write_reg(&reg_base->mode, 0);
+ mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+ mpc8xxx_spi_write_reg(&reg_base->command, 0);
+ mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
+
+ /* Enable SPI interface */
+ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+ if (mpc8xxx_spi->max_bits_per_word < 8) {
+ regval &= ~SPMODE_LEN(0xF);
+ regval |= SPMODE_LEN(mpc8xxx_spi->max_bits_per_word - 1);
+ }
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
+ regval |= SPMODE_OP;
+
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret < 0)
+ goto err_probe;
+
+ dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
+ mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
+
+ return master;
+
+err_probe:
+ fsl_spi_cpm_free(mpc8xxx_spi);
+err_cpm_init:
+ spi_master_put(master);
+err:
+ return ERR_PTR(ret);
+}
+
+static int of_fsl_spi_probe(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct spi_master *master;
+ struct resource mem;
+ int irq, type;
+ int ret;
+ bool spisel_boot = false;
+#if IS_ENABLED(CONFIG_FSL_SOC)
+ struct mpc8xxx_spi_probe_info *pinfo = NULL;
+#endif
+
+
+ ret = of_mpc8xxx_spi_probe(ofdev);
+ if (ret)
+ return ret;
+
+ type = fsl_spi_get_type(&ofdev->dev);
+ if (type == TYPE_FSL) {
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+#if IS_ENABLED(CONFIG_FSL_SOC)
+ pinfo = to_of_pinfo(pdata);
+
+ spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
+ if (spisel_boot) {
+ pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
+ if (!pinfo->immr_spi_cs)
+ return -ENOMEM;
+ }
+#endif
+ /*
+ * Handle the case where we have one hardwired (always selected)
+ * device on the first "chipselect". Else we let the core code
+ * handle any GPIOs or native chip selects and assign the
+ * appropriate callback for dealing with the CS lines. This isn't
+ * supported on the GRLIB variant.
+ */
+ ret = gpiod_count(dev, "cs");
+ if (ret < 0)
+ ret = 0;
+ if (ret == 0 && !spisel_boot)
+ pdata->max_chipselect = 1;
+ else
+ pdata->max_chipselect = ret + spisel_boot;
+ }
+
+ ret = of_address_to_resource(np, 0, &mem);
+ if (ret)
+ goto unmap_out;
+
+ irq = platform_get_irq(ofdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto unmap_out;
+ }
+
+ master = fsl_spi_probe(dev, &mem, irq);
+
+ return PTR_ERR_OR_ZERO(master);
+
+unmap_out:
+#if IS_ENABLED(CONFIG_FSL_SOC)
+ if (spisel_boot)
+ iounmap(pinfo->immr_spi_cs);
+#endif
+ return ret;
+}
+
+static int of_fsl_spi_remove(struct platform_device *ofdev)
+{
+ struct spi_master *master = platform_get_drvdata(ofdev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+
+ fsl_spi_cpm_free(mpc8xxx_spi);
+ return 0;
+}
+
+static struct platform_driver of_fsl_spi_driver = {
+ .driver = {
+ .name = "fsl_spi",
+ .of_match_table = of_fsl_spi_match,
+ },
+ .probe = of_fsl_spi_probe,
+ .remove = of_fsl_spi_remove,
+};
+
+#ifdef CONFIG_MPC832x_RDB
+/*
+ * XXX XXX XXX
+ * This is "legacy" platform driver, was used by the MPC8323E-RDB boards
+ * only. The driver should go away soon, since newer MPC8323E-RDB's device
+ * tree can work with OpenFirmware driver. But for now we support old trees
+ * as well.
+ */
+static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *mem;
+ int irq;
+ struct spi_master *master;
+
+ if (!dev_get_platdata(&pdev->dev))
+ return -EINVAL;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ master = fsl_spi_probe(&pdev->dev, mem, irq);
+ return PTR_ERR_OR_ZERO(master);
+}
+
+static int plat_mpc8xxx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+
+ fsl_spi_cpm_free(mpc8xxx_spi);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:mpc8xxx_spi");
+static struct platform_driver mpc8xxx_spi_driver = {
+ .probe = plat_mpc8xxx_spi_probe,
+ .remove = plat_mpc8xxx_spi_remove,
+ .driver = {
+ .name = "mpc8xxx_spi",
+ },
+};
+
+static bool legacy_driver_failed;
+
+static void __init legacy_driver_register(void)
+{
+ legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver);
+}
+
+static void __exit legacy_driver_unregister(void)
+{
+ if (legacy_driver_failed)
+ return;
+ platform_driver_unregister(&mpc8xxx_spi_driver);
+}
+#else
+static void __init legacy_driver_register(void) {}
+static void __exit legacy_driver_unregister(void) {}
+#endif /* CONFIG_MPC832x_RDB */
+
+static int __init fsl_spi_init(void)
+{
+ legacy_driver_register();
+ return platform_driver_register(&of_fsl_spi_driver);
+}
+module_init(fsl_spi_init);
+
+static void __exit fsl_spi_exit(void)
+{
+ platform_driver_unregister(&of_fsl_spi_driver);
+ legacy_driver_unregister();
+}
+module_exit(fsl_spi_exit);
+
+MODULE_AUTHOR("Kumar Gala");
+MODULE_DESCRIPTION("Simple Freescale SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-spi.h b/drivers/spi/spi-fsl-spi.h
new file mode 100644
index 000000000..fd8a5bef0
--- /dev/null
+++ b/drivers/spi/spi-fsl-spi.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Freescale SPI controller driver.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * GRLIB support:
+ * Copyright (c) 2012 Aeroflex Gaisler AB.
+ * Author: Andreas Larsson <andreas@gaisler.com>
+ */
+
+#ifndef __SPI_FSL_SPI_H__
+#define __SPI_FSL_SPI_H__
+
+/* SPI Controller registers */
+struct fsl_spi_reg {
+ __be32 cap; /* TYPE_GRLIB specific */
+ u8 res1[0x1C];
+ __be32 mode;
+ __be32 event;
+ __be32 mask;
+ __be32 command;
+ __be32 transmit;
+ __be32 receive;
+ __be32 slvsel; /* TYPE_GRLIB specific */
+};
+
+/* SPI Controller mode register definitions */
+#define SPMODE_LOOP (1 << 30)
+#define SPMODE_CI_INACTIVEHIGH (1 << 29)
+#define SPMODE_CP_BEGIN_EDGECLK (1 << 28)
+#define SPMODE_DIV16 (1 << 27)
+#define SPMODE_REV (1 << 26)
+#define SPMODE_MS (1 << 25)
+#define SPMODE_ENABLE (1 << 24)
+#define SPMODE_LEN(x) ((x) << 20)
+#define SPMODE_PM(x) ((x) << 16)
+#define SPMODE_OP (1 << 14)
+#define SPMODE_CG(x) ((x) << 7)
+
+/* TYPE_GRLIB SPI Controller capability register definitions */
+#define SPCAP_SSEN(x) (((x) >> 16) & 0x1)
+#define SPCAP_SSSZ(x) (((x) >> 24) & 0xff)
+#define SPCAP_MAXWLEN(x) (((x) >> 20) & 0xf)
+
+/*
+ * Default for SPI Mode:
+ * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
+ */
+#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
+ SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
+
+/* SPIE register values */
+#define SPIE_NE 0x00000200 /* Not empty */
+#define SPIE_NF 0x00000100 /* Not full */
+
+/* SPIM register values */
+#define SPIM_NE 0x00000200 /* Not empty */
+#define SPIM_NF 0x00000100 /* Not full */
+
+#endif /* __SPI_FSL_SPI_H__ */
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
new file mode 100644
index 000000000..7b76dcd11
--- /dev/null
+++ b/drivers/spi/spi-geni-qcom.c
@@ -0,0 +1,1110 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom-gpi-dma.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+
+/* SPI SE specific registers and respective register fields */
+#define SE_SPI_CPHA 0x224
+#define CPHA BIT(0)
+
+#define SE_SPI_LOOPBACK 0x22c
+#define LOOPBACK_ENABLE 0x1
+#define NORMAL_MODE 0x0
+#define LOOPBACK_MSK GENMASK(1, 0)
+
+#define SE_SPI_CPOL 0x230
+#define CPOL BIT(2)
+
+#define SE_SPI_DEMUX_OUTPUT_INV 0x24c
+#define CS_DEMUX_OUTPUT_INV_MSK GENMASK(3, 0)
+
+#define SE_SPI_DEMUX_SEL 0x250
+#define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
+
+#define SE_SPI_TRANS_CFG 0x25c
+#define CS_TOGGLE BIT(1)
+
+#define SE_SPI_WORD_LEN 0x268
+#define WORD_LEN_MSK GENMASK(9, 0)
+#define MIN_WORD_LEN 4
+
+#define SE_SPI_TX_TRANS_LEN 0x26c
+#define SE_SPI_RX_TRANS_LEN 0x270
+#define TRANS_LEN_MSK GENMASK(23, 0)
+
+#define SE_SPI_PRE_POST_CMD_DLY 0x274
+
+#define SE_SPI_DELAY_COUNTERS 0x278
+#define SPI_INTER_WORDS_DELAY_MSK GENMASK(9, 0)
+#define SPI_CS_CLK_DELAY_MSK GENMASK(19, 10)
+#define SPI_CS_CLK_DELAY_SHFT 10
+
+/* M_CMD OP codes for SPI */
+#define SPI_TX_ONLY 1
+#define SPI_RX_ONLY 2
+#define SPI_TX_RX 7
+#define SPI_CS_ASSERT 8
+#define SPI_CS_DEASSERT 9
+#define SPI_SCK_ONLY 10
+/* M_CMD params for SPI */
+#define SPI_PRE_CMD_DELAY BIT(0)
+#define TIMESTAMP_BEFORE BIT(1)
+#define FRAGMENTATION BIT(2)
+#define TIMESTAMP_AFTER BIT(3)
+#define POST_CMD_DELAY BIT(4)
+
+#define GSI_LOOPBACK_EN BIT(0)
+#define GSI_CS_TOGGLE BIT(3)
+#define GSI_CPHA BIT(4)
+#define GSI_CPOL BIT(5)
+
+struct spi_geni_master {
+ struct geni_se se;
+ struct device *dev;
+ u32 tx_fifo_depth;
+ u32 fifo_width_bits;
+ u32 tx_wm;
+ u32 last_mode;
+ unsigned long cur_speed_hz;
+ unsigned long cur_sclk_hz;
+ unsigned int cur_bits_per_word;
+ unsigned int tx_rem_bytes;
+ unsigned int rx_rem_bytes;
+ const struct spi_transfer *cur_xfer;
+ struct completion cs_done;
+ struct completion cancel_done;
+ struct completion abort_done;
+ unsigned int oversampling;
+ spinlock_t lock;
+ int irq;
+ bool cs_flag;
+ bool abort_failed;
+ struct dma_chan *tx;
+ struct dma_chan *rx;
+ int cur_xfer_mode;
+};
+
+static int get_spi_clk_cfg(unsigned int speed_hz,
+ struct spi_geni_master *mas,
+ unsigned int *clk_idx,
+ unsigned int *clk_div)
+{
+ unsigned long sclk_freq;
+ unsigned int actual_hz;
+ int ret;
+
+ ret = geni_se_clk_freq_match(&mas->se,
+ speed_hz * mas->oversampling,
+ clk_idx, &sclk_freq, false);
+ if (ret) {
+ dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n",
+ ret, speed_hz);
+ return ret;
+ }
+
+ *clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz);
+ actual_hz = sclk_freq / (mas->oversampling * *clk_div);
+
+ dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
+ actual_hz, sclk_freq, *clk_idx, *clk_div);
+ ret = dev_pm_opp_set_rate(mas->dev, sclk_freq);
+ if (ret)
+ dev_err(mas->dev, "dev_pm_opp_set_rate failed %d\n", ret);
+ else
+ mas->cur_sclk_hz = sclk_freq;
+
+ return ret;
+}
+
+static void handle_fifo_timeout(struct spi_master *spi,
+ struct spi_message *msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ unsigned long time_left;
+ struct geni_se *se = &mas->se;
+
+ spin_lock_irq(&mas->lock);
+ reinit_completion(&mas->cancel_done);
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ mas->cur_xfer = NULL;
+ geni_se_cancel_m_cmd(se);
+ spin_unlock_irq(&mas->lock);
+
+ time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
+ if (time_left)
+ return;
+
+ spin_lock_irq(&mas->lock);
+ reinit_completion(&mas->abort_done);
+ geni_se_abort_m_cmd(se);
+ spin_unlock_irq(&mas->lock);
+
+ time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
+ if (!time_left) {
+ dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+ /*
+ * No need for a lock since SPI core has a lock and we never
+ * access this from an interrupt.
+ */
+ mas->abort_failed = true;
+ }
+}
+
+static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ dmaengine_terminate_sync(mas->tx);
+ dmaengine_terminate_sync(mas->rx);
+}
+
+static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ switch (mas->cur_xfer_mode) {
+ case GENI_SE_FIFO:
+ handle_fifo_timeout(spi, msg);
+ break;
+ case GENI_GPI_DMA:
+ handle_gpi_timeout(spi, msg);
+ break;
+ default:
+ dev_err(mas->dev, "Abort on Mode:%d not supported", mas->cur_xfer_mode);
+ }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 m_irq, m_irq_en;
+
+ if (!mas->abort_failed)
+ return false;
+
+ /*
+ * The only known case where a transfer times out and then a cancel
+ * times out then an abort times out is if something is blocking our
+ * interrupt handler from running. Avoid starting any new transfers
+ * until that sorts itself out.
+ */
+ spin_lock_irq(&mas->lock);
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+ spin_unlock_irq(&mas->lock);
+
+ if (m_irq & m_irq_en) {
+ dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+ m_irq & m_irq_en);
+ return true;
+ }
+
+ /*
+ * If we're here the problem resolved itself so no need to check more
+ * on future transfers.
+ */
+ mas->abort_failed = false;
+
+ return false;
+}
+
+static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
+ struct spi_master *spi = dev_get_drvdata(mas->dev);
+ struct geni_se *se = &mas->se;
+ unsigned long time_left;
+
+ if (!(slv->mode & SPI_CS_HIGH))
+ set_flag = !set_flag;
+
+ if (set_flag == mas->cs_flag)
+ return;
+
+ pm_runtime_get_sync(mas->dev);
+
+ if (spi_geni_is_abort_still_pending(mas)) {
+ dev_err(mas->dev, "Can't set chip select\n");
+ goto exit;
+ }
+
+ spin_lock_irq(&mas->lock);
+ if (mas->cur_xfer) {
+ dev_err(mas->dev, "Can't set CS when prev xfer running\n");
+ spin_unlock_irq(&mas->lock);
+ goto exit;
+ }
+
+ mas->cs_flag = set_flag;
+ reinit_completion(&mas->cs_done);
+ if (set_flag)
+ geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
+ else
+ geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
+ spin_unlock_irq(&mas->lock);
+
+ time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
+ if (!time_left) {
+ dev_warn(mas->dev, "Timeout setting chip select\n");
+ handle_fifo_timeout(spi, NULL);
+ }
+
+exit:
+ pm_runtime_put(mas->dev);
+}
+
+static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
+ unsigned int bits_per_word)
+{
+ unsigned int pack_words;
+ bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
+ struct geni_se *se = &mas->se;
+ u32 word_len;
+
+ /*
+ * If bits_per_word isn't a byte aligned value, set the packing to be
+ * 1 SPI word per FIFO word.
+ */
+ if (!(mas->fifo_width_bits % bits_per_word))
+ pack_words = mas->fifo_width_bits / bits_per_word;
+ else
+ pack_words = 1;
+ geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
+ true, true);
+ word_len = (bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK;
+ writel(word_len, se->base + SE_SPI_WORD_LEN);
+}
+
+static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas,
+ unsigned long clk_hz)
+{
+ u32 clk_sel, m_clk_cfg, idx, div;
+ struct geni_se *se = &mas->se;
+ int ret;
+
+ if (clk_hz == mas->cur_speed_hz)
+ return 0;
+
+ ret = get_spi_clk_cfg(clk_hz, mas, &idx, &div);
+ if (ret) {
+ dev_err(mas->dev, "Err setting clk to %lu: %d\n", clk_hz, ret);
+ return ret;
+ }
+
+ /*
+ * SPI core clock gets configured with the requested frequency
+ * or the frequency closer to the requested frequency.
+ * For that reason requested frequency is stored in the
+ * cur_speed_hz and referred in the consecutive transfer instead
+ * of calling clk_get_rate() API.
+ */
+ mas->cur_speed_hz = clk_hz;
+
+ clk_sel = idx & CLK_SEL_MSK;
+ m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
+ writel(clk_sel, se->base + SE_GENI_CLK_SEL);
+ writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
+
+ /* Set BW quota for CPU as driver supports FIFO mode only. */
+ se->icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(mas->cur_speed_hz);
+ ret = geni_icc_set_bw(se);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int setup_fifo_params(struct spi_device *spi_slv,
+ struct spi_master *spi)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct geni_se *se = &mas->se;
+ u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0;
+ u32 demux_sel;
+
+ if (mas->last_mode != spi_slv->mode) {
+ if (spi_slv->mode & SPI_LOOP)
+ loopback_cfg = LOOPBACK_ENABLE;
+
+ if (spi_slv->mode & SPI_CPOL)
+ cpol = CPOL;
+
+ if (spi_slv->mode & SPI_CPHA)
+ cpha = CPHA;
+
+ if (spi_slv->mode & SPI_CS_HIGH)
+ demux_output_inv = BIT(spi_slv->chip_select);
+
+ demux_sel = spi_slv->chip_select;
+ mas->cur_bits_per_word = spi_slv->bits_per_word;
+
+ spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
+ writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
+ writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
+ writel(cpha, se->base + SE_SPI_CPHA);
+ writel(cpol, se->base + SE_SPI_CPOL);
+ writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
+
+ mas->last_mode = spi_slv->mode;
+ }
+
+ return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
+}
+
+static void
+spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
+{
+ struct spi_master *spi = cb;
+
+ spi->cur_msg->status = -EIO;
+ if (result->result != DMA_TRANS_NOERROR) {
+ dev_err(&spi->dev, "DMA txn failed: %d\n", result->result);
+ spi_finalize_current_transfer(spi);
+ return;
+ }
+
+ if (!result->residue) {
+ spi->cur_msg->status = 0;
+ dev_dbg(&spi->dev, "DMA txn completed\n");
+ } else {
+ dev_err(&spi->dev, "DMA xfer has pending: %d\n", result->residue);
+ }
+
+ spi_finalize_current_transfer(spi);
+}
+
+static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
+ struct spi_device *spi_slv, struct spi_master *spi)
+{
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ struct dma_slave_config config = {};
+ struct gpi_spi_config peripheral = {};
+ struct dma_async_tx_descriptor *tx_desc, *rx_desc;
+ int ret;
+
+ config.peripheral_config = &peripheral;
+ config.peripheral_size = sizeof(peripheral);
+ peripheral.set_config = true;
+
+ if (xfer->bits_per_word != mas->cur_bits_per_word ||
+ xfer->speed_hz != mas->cur_speed_hz) {
+ mas->cur_bits_per_word = xfer->bits_per_word;
+ mas->cur_speed_hz = xfer->speed_hz;
+ }
+
+ if (xfer->tx_buf && xfer->rx_buf) {
+ peripheral.cmd = SPI_DUPLEX;
+ } else if (xfer->tx_buf) {
+ peripheral.cmd = SPI_TX;
+ peripheral.rx_len = 0;
+ } else if (xfer->rx_buf) {
+ peripheral.cmd = SPI_RX;
+ if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) {
+ peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word);
+ } else {
+ int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1;
+
+ peripheral.rx_len = (xfer->len / bytes_per_word);
+ }
+ }
+
+ peripheral.loopback_en = !!(spi_slv->mode & SPI_LOOP);
+ peripheral.clock_pol_high = !!(spi_slv->mode & SPI_CPOL);
+ peripheral.data_pol_high = !!(spi_slv->mode & SPI_CPHA);
+ peripheral.cs = spi_slv->chip_select;
+ peripheral.pack_en = true;
+ peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN;
+
+ ret = get_spi_clk_cfg(mas->cur_speed_hz, mas,
+ &peripheral.clk_src, &peripheral.clk_div);
+ if (ret) {
+ dev_err(mas->dev, "Err in get_spi_clk_cfg() :%d\n", ret);
+ return ret;
+ }
+
+ if (!xfer->cs_change) {
+ if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
+ peripheral.fragmentation = FRAGMENTATION;
+ }
+
+ if (peripheral.cmd & SPI_RX) {
+ dmaengine_slave_config(mas->rx, &config);
+ rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM, flags);
+ if (!rx_desc) {
+ dev_err(mas->dev, "Err setting up rx desc\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * Prepare the TX always, even for RX or tx_buf being null, we would
+ * need TX to be prepared per GSI spec
+ */
+ dmaengine_slave_config(mas->tx, &config);
+ tx_desc = dmaengine_prep_slave_sg(mas->tx, xfer->tx_sg.sgl, xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV, flags);
+ if (!tx_desc) {
+ dev_err(mas->dev, "Err setting up tx desc\n");
+ return -EIO;
+ }
+
+ tx_desc->callback_result = spi_gsi_callback_result;
+ tx_desc->callback_param = spi;
+
+ if (peripheral.cmd & SPI_RX)
+ dmaengine_submit(rx_desc);
+ dmaengine_submit(tx_desc);
+
+ if (peripheral.cmd & SPI_RX)
+ dma_async_issue_pending(mas->rx);
+
+ dma_async_issue_pending(mas->tx);
+ return 1;
+}
+
+static bool geni_can_dma(struct spi_controller *ctlr,
+ struct spi_device *slv, struct spi_transfer *xfer)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
+
+ /* check if dma is supported */
+ return mas->cur_xfer_mode != GENI_SE_FIFO;
+}
+
+static int spi_geni_prepare_message(struct spi_master *spi,
+ struct spi_message *spi_msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
+
+ switch (mas->cur_xfer_mode) {
+ case GENI_SE_FIFO:
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+ ret = setup_fifo_params(spi_msg->spi, spi);
+ if (ret)
+ dev_err(mas->dev, "Couldn't select mode %d\n", ret);
+ return ret;
+
+ case GENI_GPI_DMA:
+ /* nothing to do for GPI DMA */
+ return 0;
+ }
+
+ dev_err(mas->dev, "Mode not supported %d", mas->cur_xfer_mode);
+ return -EINVAL;
+}
+
+static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
+{
+ int ret;
+
+ mas->tx = dma_request_chan(mas->dev, "tx");
+ if (IS_ERR(mas->tx)) {
+ ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx),
+ "Failed to get tx DMA ch\n");
+ goto err_tx;
+ }
+
+ mas->rx = dma_request_chan(mas->dev, "rx");
+ if (IS_ERR(mas->rx)) {
+ ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx),
+ "Failed to get rx DMA ch\n");
+ goto err_rx;
+ }
+
+ return 0;
+
+err_rx:
+ mas->rx = NULL;
+ dma_release_channel(mas->tx);
+err_tx:
+ mas->tx = NULL;
+ return ret;
+}
+
+static void spi_geni_release_dma_chan(struct spi_geni_master *mas)
+{
+ if (mas->rx) {
+ dma_release_channel(mas->rx);
+ mas->rx = NULL;
+ }
+
+ if (mas->tx) {
+ dma_release_channel(mas->tx);
+ mas->tx = NULL;
+ }
+}
+
+static int spi_geni_init(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ unsigned int proto, major, minor, ver;
+ u32 spi_tx_cfg, fifo_disable;
+ int ret = -ENXIO;
+
+ pm_runtime_get_sync(mas->dev);
+
+ proto = geni_se_read_proto(se);
+ if (proto != GENI_SE_SPI) {
+ dev_err(mas->dev, "Invalid proto %d\n", proto);
+ goto out_pm;
+ }
+ mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
+
+ /* Width of Tx and Rx FIFO is same */
+ mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
+
+ /*
+ * Hardware programming guide suggests to configure
+ * RX FIFO RFR level to fifo_depth-2.
+ */
+ geni_se_init(se, mas->tx_fifo_depth - 3, mas->tx_fifo_depth - 2);
+ /* Transmit an entire FIFO worth of data per IRQ */
+ mas->tx_wm = 1;
+ ver = geni_se_get_qup_hw_version(se);
+ major = GENI_SE_VERSION_MAJOR(ver);
+ minor = GENI_SE_VERSION_MINOR(ver);
+
+ if (major == 1 && minor == 0)
+ mas->oversampling = 2;
+ else
+ mas->oversampling = 1;
+
+ fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
+ switch (fifo_disable) {
+ case 1:
+ ret = spi_geni_grab_gpi_chan(mas);
+ if (!ret) { /* success case */
+ mas->cur_xfer_mode = GENI_GPI_DMA;
+ geni_se_select_mode(se, GENI_GPI_DMA);
+ dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
+ break;
+ } else if (ret == -EPROBE_DEFER) {
+ goto out_pm;
+ }
+ /*
+ * in case of failure to get dma channel, we can still do the
+ * FIFO mode, so fallthrough
+ */
+ dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
+ fallthrough;
+
+ case 0:
+ mas->cur_xfer_mode = GENI_SE_FIFO;
+ geni_se_select_mode(se, GENI_SE_FIFO);
+ ret = 0;
+ break;
+ }
+
+ /* We always control CS manually */
+ spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
+ spi_tx_cfg &= ~CS_TOGGLE;
+ writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
+
+out_pm:
+ pm_runtime_put(mas->dev);
+ return ret;
+}
+
+static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
+{
+ /*
+ * Calculate how many bytes we'll put in each FIFO word. If the
+ * transfer words don't pack cleanly into a FIFO word we'll just put
+ * one transfer word in each FIFO word. If they do pack we'll pack 'em.
+ */
+ if (mas->fifo_width_bits % mas->cur_bits_per_word)
+ return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
+ BITS_PER_BYTE));
+
+ return mas->fifo_width_bits / BITS_PER_BYTE;
+}
+
+static bool geni_spi_handle_tx(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ unsigned int max_bytes;
+ const u8 *tx_buf;
+ unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
+ unsigned int i = 0;
+
+ /* Stop the watermark IRQ if nothing to send */
+ if (!mas->cur_xfer) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ return false;
+ }
+
+ max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
+ if (mas->tx_rem_bytes < max_bytes)
+ max_bytes = mas->tx_rem_bytes;
+
+ tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
+ while (i < max_bytes) {
+ unsigned int j;
+ unsigned int bytes_to_write;
+ u32 fifo_word = 0;
+ u8 *fifo_byte = (u8 *)&fifo_word;
+
+ bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
+ for (j = 0; j < bytes_to_write; j++)
+ fifo_byte[j] = tx_buf[i++];
+ iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
+ }
+ mas->tx_rem_bytes -= max_bytes;
+ if (!mas->tx_rem_bytes) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ return false;
+ }
+ return true;
+}
+
+static void geni_spi_handle_rx(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 rx_fifo_status;
+ unsigned int rx_bytes;
+ unsigned int rx_last_byte_valid;
+ u8 *rx_buf;
+ unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
+ unsigned int i = 0;
+
+ rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
+ rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
+ if (rx_fifo_status & RX_LAST) {
+ rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
+ rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
+ if (rx_last_byte_valid && rx_last_byte_valid < 4)
+ rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
+ }
+
+ /* Clear out the FIFO and bail if nowhere to put it */
+ if (!mas->cur_xfer) {
+ for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
+ readl(se->base + SE_GENI_RX_FIFOn);
+ return;
+ }
+
+ if (mas->rx_rem_bytes < rx_bytes)
+ rx_bytes = mas->rx_rem_bytes;
+
+ rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
+ while (i < rx_bytes) {
+ u32 fifo_word = 0;
+ u8 *fifo_byte = (u8 *)&fifo_word;
+ unsigned int bytes_to_read;
+ unsigned int j;
+
+ bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
+ ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
+ for (j = 0; j < bytes_to_read; j++)
+ rx_buf[i++] = fifo_byte[j];
+ }
+ mas->rx_rem_bytes -= rx_bytes;
+}
+
+static void setup_fifo_xfer(struct spi_transfer *xfer,
+ struct spi_geni_master *mas,
+ u16 mode, struct spi_master *spi)
+{
+ u32 m_cmd = 0;
+ u32 len;
+ struct geni_se *se = &mas->se;
+ int ret;
+
+ /*
+ * Ensure that our interrupt handler isn't still running from some
+ * prior command before we start messing with the hardware behind
+ * its back. We don't need to _keep_ the lock here since we're only
+ * worried about racing with out interrupt handler. The SPI core
+ * already handles making sure that we're not trying to do two
+ * transfers at once or setting a chip select and doing a transfer
+ * concurrently.
+ *
+ * NOTE: we actually _can't_ hold the lock here because possibly we
+ * might call clk_set_rate() which needs to be able to sleep.
+ */
+ spin_lock_irq(&mas->lock);
+ spin_unlock_irq(&mas->lock);
+
+ if (xfer->bits_per_word != mas->cur_bits_per_word) {
+ spi_setup_word_len(mas, mode, xfer->bits_per_word);
+ mas->cur_bits_per_word = xfer->bits_per_word;
+ }
+
+ /* Speed and bits per word can be overridden per transfer */
+ ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
+ if (ret)
+ return;
+
+ mas->tx_rem_bytes = 0;
+ mas->rx_rem_bytes = 0;
+
+ if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
+ len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
+ else
+ len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
+ len &= TRANS_LEN_MSK;
+
+ mas->cur_xfer = xfer;
+ if (xfer->tx_buf) {
+ m_cmd |= SPI_TX_ONLY;
+ mas->tx_rem_bytes = xfer->len;
+ writel(len, se->base + SE_SPI_TX_TRANS_LEN);
+ }
+
+ if (xfer->rx_buf) {
+ m_cmd |= SPI_RX_ONLY;
+ writel(len, se->base + SE_SPI_RX_TRANS_LEN);
+ mas->rx_rem_bytes = xfer->len;
+ }
+
+ /*
+ * Lock around right before we start the transfer since our
+ * interrupt could come in at any time now.
+ */
+ spin_lock_irq(&mas->lock);
+ geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
+ if (m_cmd & SPI_TX_ONLY) {
+ if (geni_spi_handle_tx(mas))
+ writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
+ }
+ spin_unlock_irq(&mas->lock);
+}
+
+static int spi_geni_transfer_one(struct spi_master *spi,
+ struct spi_device *slv,
+ struct spi_transfer *xfer)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
+ /* Terminate and return success for 0 byte length transfer */
+ if (!xfer->len)
+ return 0;
+
+ if (mas->cur_xfer_mode == GENI_SE_FIFO) {
+ setup_fifo_xfer(xfer, mas, slv->mode, spi);
+ return 1;
+ }
+ return setup_gsi_xfer(xfer, mas, slv, spi);
+}
+
+static irqreturn_t geni_spi_isr(int irq, void *data)
+{
+ struct spi_master *spi = data;
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct geni_se *se = &mas->se;
+ u32 m_irq;
+
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ if (!m_irq)
+ return IRQ_NONE;
+
+ if (m_irq & (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |
+ M_RX_FIFO_RD_ERR_EN | M_RX_FIFO_WR_ERR_EN |
+ M_TX_FIFO_RD_ERR_EN | M_TX_FIFO_WR_ERR_EN))
+ dev_warn(mas->dev, "Unexpected IRQ err status %#010x\n", m_irq);
+
+ spin_lock(&mas->lock);
+
+ if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
+ geni_spi_handle_rx(mas);
+
+ if (m_irq & M_TX_FIFO_WATERMARK_EN)
+ geni_spi_handle_tx(mas);
+
+ if (m_irq & M_CMD_DONE_EN) {
+ if (mas->cur_xfer) {
+ spi_finalize_current_transfer(spi);
+ mas->cur_xfer = NULL;
+ /*
+ * If this happens, then a CMD_DONE came before all the
+ * Tx buffer bytes were sent out. This is unusual, log
+ * this condition and disable the WM interrupt to
+ * prevent the system from stalling due an interrupt
+ * storm.
+ *
+ * If this happens when all Rx bytes haven't been
+ * received, log the condition. The only known time
+ * this can happen is if bits_per_word != 8 and some
+ * registers that expect xfer lengths in num spi_words
+ * weren't written correctly.
+ */
+ if (mas->tx_rem_bytes) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
+ mas->tx_rem_bytes, mas->cur_bits_per_word);
+ }
+ if (mas->rx_rem_bytes)
+ dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
+ mas->rx_rem_bytes, mas->cur_bits_per_word);
+ } else {
+ complete(&mas->cs_done);
+ }
+ }
+
+ if (m_irq & M_CMD_CANCEL_EN)
+ complete(&mas->cancel_done);
+ if (m_irq & M_CMD_ABORT_EN)
+ complete(&mas->abort_done);
+
+ /*
+ * It's safe or a good idea to Ack all of our interrupts at the end
+ * of the function. Specifically:
+ * - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
+ * clearing Acks. Clearing at the end relies on nobody else having
+ * started a new transfer yet or else we could be clearing _their_
+ * done bit, but everyone grabs the spinlock before starting a new
+ * transfer.
+ * - M_RX_FIFO_WATERMARK_EN / M_TX_FIFO_WATERMARK_EN: These appear
+ * to be "latched level" interrupts so it's important to clear them
+ * _after_ you've handled the condition and always safe to do so
+ * since they'll re-assert if they're still happening.
+ */
+ writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
+
+ spin_unlock(&mas->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_geni_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ struct spi_master *spi;
+ struct spi_geni_master *mas;
+ void __iomem *base;
+ struct clk *clk;
+ struct device *dev = &pdev->dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return dev_err_probe(dev, ret, "could not set DMA mask\n");
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk = devm_clk_get(dev, "se");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ spi = devm_spi_alloc_master(dev, sizeof(*mas));
+ if (!spi)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, spi);
+ mas = spi_master_get_devdata(spi);
+ mas->irq = irq;
+ mas->dev = dev;
+ mas->se.dev = dev;
+ mas->se.wrapper = dev_get_drvdata(dev->parent);
+ mas->se.base = base;
+ mas->se.clk = clk;
+
+ ret = devm_pm_opp_set_clkname(&pdev->dev, "se");
+ if (ret)
+ return ret;
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(&pdev->dev);
+ if (ret && ret != -ENODEV) {
+ dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+ return ret;
+ }
+
+ spi->bus_num = -1;
+ spi->dev.of_node = dev->of_node;
+ spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
+ spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ spi->num_chipselect = 4;
+ spi->max_speed_hz = 50000000;
+ spi->prepare_message = spi_geni_prepare_message;
+ spi->transfer_one = spi_geni_transfer_one;
+ spi->can_dma = geni_can_dma;
+ spi->dma_map_dev = dev->parent;
+ spi->auto_runtime_pm = true;
+ spi->handle_err = spi_geni_handle_err;
+ spi->use_gpio_descriptors = true;
+
+ init_completion(&mas->cs_done);
+ init_completion(&mas->cancel_done);
+ init_completion(&mas->abort_done);
+ spin_lock_init(&mas->lock);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
+ pm_runtime_enable(dev);
+
+ ret = geni_icc_get(&mas->se, NULL);
+ if (ret)
+ goto spi_geni_probe_runtime_disable;
+ /* Set the bus quota to a reasonable value for register access */
+ mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
+ mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+
+ ret = geni_icc_set_bw(&mas->se);
+ if (ret)
+ goto spi_geni_probe_runtime_disable;
+
+ ret = spi_geni_init(mas);
+ if (ret)
+ goto spi_geni_probe_runtime_disable;
+
+ /*
+ * check the mode supported and set_cs for fifo mode only
+ * for dma (gsi) mode, the gsi will set cs based on params passed in
+ * TRE
+ */
+ if (mas->cur_xfer_mode == GENI_SE_FIFO)
+ spi->set_cs = spi_geni_set_cs;
+
+ /*
+ * TX is required per GSI spec, see setup_gsi_xfer().
+ */
+ if (mas->cur_xfer_mode == GENI_GPI_DMA)
+ spi->flags = SPI_CONTROLLER_MUST_TX;
+
+ ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
+ if (ret)
+ goto spi_geni_release_dma;
+
+ ret = spi_register_master(spi);
+ if (ret)
+ goto spi_geni_probe_free_irq;
+
+ return 0;
+spi_geni_probe_free_irq:
+ free_irq(mas->irq, spi);
+spi_geni_release_dma:
+ spi_geni_release_dma_chan(mas);
+spi_geni_probe_runtime_disable:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int spi_geni_remove(struct platform_device *pdev)
+{
+ struct spi_master *spi = platform_get_drvdata(pdev);
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ /* Unregister _before_ disabling pm_runtime() so we stop transfers */
+ spi_unregister_master(spi);
+
+ spi_geni_release_dma_chan(mas);
+
+ free_irq(mas->irq, spi);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
+{
+ struct spi_master *spi = dev_get_drvdata(dev);
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
+
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(dev, 0);
+
+ ret = geni_se_resources_off(&mas->se);
+ if (ret)
+ return ret;
+
+ return geni_icc_disable(&mas->se);
+}
+
+static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
+{
+ struct spi_master *spi = dev_get_drvdata(dev);
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
+
+ ret = geni_icc_enable(&mas->se);
+ if (ret)
+ return ret;
+
+ ret = geni_se_resources_on(&mas->se);
+ if (ret)
+ return ret;
+
+ return dev_pm_opp_set_rate(mas->dev, mas->cur_sclk_hz);
+}
+
+static int __maybe_unused spi_geni_suspend(struct device *dev)
+{
+ struct spi_master *spi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(spi);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ spi_master_resume(spi);
+
+ return ret;
+}
+
+static int __maybe_unused spi_geni_resume(struct device *dev)
+{
+ struct spi_master *spi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(spi);
+ if (ret)
+ pm_runtime_force_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops spi_geni_pm_ops = {
+ SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
+ spi_geni_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
+};
+
+static const struct of_device_id spi_geni_dt_match[] = {
+ { .compatible = "qcom,geni-spi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
+
+static struct platform_driver spi_geni_driver = {
+ .probe = spi_geni_probe,
+ .remove = spi_geni_remove,
+ .driver = {
+ .name = "geni_spi",
+ .pm = &spi_geni_pm_ops,
+ .of_match_table = spi_geni_dt_match,
+ },
+};
+module_platform_driver(spi_geni_driver);
+
+MODULE_DESCRIPTION("SPI driver for GENI based QUP cores");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
new file mode 100644
index 000000000..9c8c79480
--- /dev/null
+++ b/drivers/spi/spi-gpio.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SPI master driver using generic bitbanged GPIO
+ *
+ * Copyright (C) 2006,2008 David Brownell
+ * Copyright (C) 2017 Linus Walleij
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/spi_gpio.h>
+
+
+/*
+ * This bitbanging SPI master driver should help make systems usable
+ * when a native hardware SPI engine is not available, perhaps because
+ * its driver isn't yet working or because the I/O pins it requires
+ * are used for other purposes.
+ *
+ * platform_device->driver_data ... points to spi_gpio
+ *
+ * spi->controller_state ... reserved for bitbang framework code
+ *
+ * spi->master->dev.driver_data ... points to spi_gpio->bitbang
+ */
+
+struct spi_gpio {
+ struct spi_bitbang bitbang;
+ struct gpio_desc *sck;
+ struct gpio_desc *miso;
+ struct gpio_desc *mosi;
+ struct gpio_desc **cs_gpios;
+};
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Because the overhead of going through four GPIO procedure calls
+ * per transferred bit can make performance a problem, this code
+ * is set up so that you can use it in either of two ways:
+ *
+ * - The slow generic way: set up platform_data to hold the GPIO
+ * numbers used for MISO/MOSI/SCK, and issue procedure calls for
+ * each of them. This driver can handle several such busses.
+ *
+ * - The quicker inlined way: only helps with platform GPIO code
+ * that inlines operations for constant GPIOs. This can give
+ * you tight (fast!) inner loops, but each such bus needs a
+ * new driver. You'll define a new C file, with Makefile and
+ * Kconfig support; the C code can be a total of six lines:
+ *
+ * #define DRIVER_NAME "myboard_spi2"
+ * #define SPI_MISO_GPIO 119
+ * #define SPI_MOSI_GPIO 120
+ * #define SPI_SCK_GPIO 121
+ * #define SPI_N_CHIPSEL 4
+ * #include "spi-gpio.c"
+ */
+
+#ifndef DRIVER_NAME
+#define DRIVER_NAME "spi_gpio"
+
+#define GENERIC_BITBANG /* vs tight inlines */
+
+#endif
+
+/*----------------------------------------------------------------------*/
+
+static inline struct spi_gpio *__pure
+spi_to_spi_gpio(const struct spi_device *spi)
+{
+ const struct spi_bitbang *bang;
+ struct spi_gpio *spi_gpio;
+
+ bang = spi_master_get_devdata(spi->master);
+ spi_gpio = container_of(bang, struct spi_gpio, bitbang);
+ return spi_gpio;
+}
+
+/* These helpers are in turn called by the bitbang inlines */
+static inline void setsck(const struct spi_device *spi, int is_on)
+{
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+
+ gpiod_set_value_cansleep(spi_gpio->sck, is_on);
+}
+
+static inline void setmosi(const struct spi_device *spi, int is_on)
+{
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+
+ gpiod_set_value_cansleep(spi_gpio->mosi, is_on);
+}
+
+static inline int getmiso(const struct spi_device *spi)
+{
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+
+ if (spi->mode & SPI_3WIRE)
+ return !!gpiod_get_value_cansleep(spi_gpio->mosi);
+ else
+ return !!gpiod_get_value_cansleep(spi_gpio->miso);
+}
+
+/*
+ * NOTE: this clocks "as fast as we can". It "should" be a function of the
+ * requested device clock. Software overhead means we usually have trouble
+ * reaching even one Mbit/sec (except when we can inline bitops), so for now
+ * we'll just assume we never need additional per-bit slowdowns.
+ */
+#define spidelay(nsecs) do {} while (0)
+
+#include "spi-bitbang-txrx.h"
+
+/*
+ * These functions can leverage inline expansion of GPIO calls to shrink
+ * costs for a txrx bit, often by factors of around ten (by instruction
+ * count). That is particularly visible for larger word sizes, but helps
+ * even with default 8-bit words.
+ *
+ * REVISIT overheads calling these functions for each word also have
+ * significant performance costs. Having txrx_bufs() calls that inline
+ * the txrx_word() logic would help performance, e.g. on larger blocks
+ * used with flash storage or MMC/SD. There should also be ways to make
+ * GCC be less stupid about reloading registers inside the I/O loops,
+ * even without inlined GPIO calls; __attribute__((hot)) on GCC 4.3?
+ */
+
+static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
+{
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+}
+
+static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
+{
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
+}
+
+static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
+{
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
+}
+
+static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
+{
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
+}
+
+/*
+ * These functions do not call setmosi or getmiso if respective flag
+ * (SPI_MASTER_NO_RX or SPI_MASTER_NO_TX) is set, so they are safe to
+ * call when such pin is not present or defined in the controller.
+ * A separate set of callbacks is defined to get highest possible
+ * speed in the generic case (when both MISO and MOSI lines are
+ * available), as optimiser will remove the checks when argument is
+ * constant.
+ */
+
+static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
+{
+ flags = spi->master->flags;
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+}
+
+static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
+{
+ flags = spi->master->flags;
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 0, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
+}
+
+static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
+{
+ flags = spi->master->flags;
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha0(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
+}
+
+static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
+{
+ flags = spi->master->flags;
+ if (unlikely(spi->mode & SPI_LSB_FIRST))
+ return bitbang_txrx_le_cpha1(spi, nsecs, 1, flags, word, bits);
+ else
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
+}
+
+/*----------------------------------------------------------------------*/
+
+static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
+{
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+
+ /* set initial clock line level */
+ if (is_active)
+ gpiod_set_value_cansleep(spi_gpio->sck, spi->mode & SPI_CPOL);
+
+ /* Drive chip select line, if we have one */
+ if (spi_gpio->cs_gpios) {
+ struct gpio_desc *cs = spi_gpio->cs_gpios[spi->chip_select];
+
+ /* SPI chip selects are normally active-low */
+ gpiod_set_value_cansleep(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
+ }
+}
+
+static int spi_gpio_setup(struct spi_device *spi)
+{
+ struct gpio_desc *cs;
+ int status = 0;
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+
+ /*
+ * The CS GPIOs have already been
+ * initialized from the descriptor lookup.
+ */
+ if (spi_gpio->cs_gpios) {
+ cs = spi_gpio->cs_gpios[spi->chip_select];
+ if (!spi->controller_state && cs)
+ status = gpiod_direction_output(cs,
+ !(spi->mode & SPI_CS_HIGH));
+ }
+
+ if (!status)
+ status = spi_bitbang_setup(spi);
+
+ return status;
+}
+
+static int spi_gpio_set_direction(struct spi_device *spi, bool output)
+{
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+ int ret;
+
+ if (output)
+ return gpiod_direction_output(spi_gpio->mosi, 1);
+
+ /*
+ * Only change MOSI to an input if using 3WIRE mode.
+ * Otherwise, MOSI could be left floating if there is
+ * no pull resistor connected to the I/O pin, or could
+ * be left logic high if there is a pull-up. Transmitting
+ * logic high when only clocking MISO data in can put some
+ * SPI devices in to a bad state.
+ */
+ if (spi->mode & SPI_3WIRE) {
+ ret = gpiod_direction_input(spi_gpio->mosi);
+ if (ret)
+ return ret;
+ }
+ /*
+ * Send a turnaround high impedance cycle when switching
+ * from output to input. Theoretically there should be
+ * a clock delay here, but as has been noted above, the
+ * nsec delay function for bit-banged GPIO is simply
+ * {} because bit-banging just doesn't get fast enough
+ * anyway.
+ */
+ if (spi->mode & SPI_3WIRE_HIZ) {
+ gpiod_set_value_cansleep(spi_gpio->sck,
+ !(spi->mode & SPI_CPOL));
+ gpiod_set_value_cansleep(spi_gpio->sck,
+ !!(spi->mode & SPI_CPOL));
+ }
+ return 0;
+}
+
+static void spi_gpio_cleanup(struct spi_device *spi)
+{
+ spi_bitbang_cleanup(spi);
+}
+
+/*
+ * It can be convenient to use this driver with pins that have alternate
+ * functions associated with a "native" SPI controller if a driver for that
+ * controller is not available, or is missing important functionality.
+ *
+ * On platforms which can do so, configure MISO with a weak pullup unless
+ * there's an external pullup on that signal. That saves power by avoiding
+ * floating signals. (A weak pulldown would save power too, but many
+ * drivers expect to see all-ones data as the no slave "response".)
+ */
+static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
+{
+ spi_gpio->mosi = devm_gpiod_get_optional(dev, "mosi", GPIOD_OUT_LOW);
+ if (IS_ERR(spi_gpio->mosi))
+ return PTR_ERR(spi_gpio->mosi);
+
+ spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN);
+ if (IS_ERR(spi_gpio->miso))
+ return PTR_ERR(spi_gpio->miso);
+
+ spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
+ return PTR_ERR_OR_ZERO(spi_gpio->sck);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id spi_gpio_dt_ids[] = {
+ { .compatible = "spi-gpio" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids);
+
+static int spi_gpio_probe_dt(struct platform_device *pdev,
+ struct spi_master *master)
+{
+ master->dev.of_node = pdev->dev.of_node;
+ master->use_gpio_descriptors = true;
+
+ return 0;
+}
+#else
+static inline int spi_gpio_probe_dt(struct platform_device *pdev,
+ struct spi_master *master)
+{
+ return 0;
+}
+#endif
+
+static int spi_gpio_probe_pdata(struct platform_device *pdev,
+ struct spi_master *master)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_gpio_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_gpio *spi_gpio = spi_master_get_devdata(master);
+ int i;
+
+#ifdef GENERIC_BITBANG
+ if (!pdata || !pdata->num_chipselect)
+ return -ENODEV;
+#endif
+ /*
+ * The master needs to think there is a chipselect even if not
+ * connected
+ */
+ master->num_chipselect = pdata->num_chipselect ?: 1;
+
+ spi_gpio->cs_gpios = devm_kcalloc(dev, master->num_chipselect,
+ sizeof(*spi_gpio->cs_gpios),
+ GFP_KERNEL);
+ if (!spi_gpio->cs_gpios)
+ return -ENOMEM;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", i,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(spi_gpio->cs_gpios[i]))
+ return PTR_ERR(spi_gpio->cs_gpios[i]);
+ }
+
+ return 0;
+}
+
+static int spi_gpio_probe(struct platform_device *pdev)
+{
+ int status;
+ struct spi_master *master;
+ struct spi_gpio *spi_gpio;
+ struct device *dev = &pdev->dev;
+ struct spi_bitbang *bb;
+
+ master = devm_spi_alloc_master(dev, sizeof(*spi_gpio));
+ if (!master)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node)
+ status = spi_gpio_probe_dt(pdev, master);
+ else
+ status = spi_gpio_probe_pdata(pdev, master);
+
+ if (status)
+ return status;
+
+ spi_gpio = spi_master_get_devdata(master);
+
+ status = spi_gpio_request(dev, spi_gpio);
+ if (status)
+ return status;
+
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
+ SPI_CS_HIGH | SPI_LSB_FIRST;
+ if (!spi_gpio->mosi) {
+ /* HW configuration without MOSI pin
+ *
+ * No setting SPI_MASTER_NO_RX here - if there is only
+ * a MOSI pin connected the host can still do RX by
+ * changing the direction of the line.
+ */
+ master->flags = SPI_MASTER_NO_TX;
+ }
+
+ master->bus_num = pdev->id;
+ master->setup = spi_gpio_setup;
+ master->cleanup = spi_gpio_cleanup;
+
+ bb = &spi_gpio->bitbang;
+ bb->master = master;
+ /*
+ * There is some additional business, apart from driving the CS GPIO
+ * line, that we need to do on selection. This makes the local
+ * callback for chipselect always get called.
+ */
+ master->flags |= SPI_MASTER_GPIO_SS;
+ bb->chipselect = spi_gpio_chipselect;
+ bb->set_line_direction = spi_gpio_set_direction;
+
+ if (master->flags & SPI_MASTER_NO_TX) {
+ bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
+ bb->txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
+ bb->txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
+ bb->txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
+ } else {
+ bb->txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
+ bb->txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
+ bb->txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
+ bb->txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
+ }
+ bb->setup_transfer = spi_bitbang_setup_transfer;
+
+ status = spi_bitbang_init(&spi_gpio->bitbang);
+ if (status)
+ return status;
+
+ return devm_spi_register_master(&pdev->dev, master);
+}
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+
+static struct platform_driver spi_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(spi_gpio_dt_ids),
+ },
+ .probe = spi_gpio_probe,
+};
+module_platform_driver(spi_gpio_driver);
+
+MODULE_DESCRIPTION("SPI master driver using generic bitbanged GPIO ");
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c
new file mode 100644
index 000000000..21b07e251
--- /dev/null
+++ b/drivers/spi/spi-gxp.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2022 Hewlett-Packard Development Company, L.P. */
+
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define GXP_SPI0_MAX_CHIPSELECT 2
+#define GXP_SPI_SLEEP_TIME 1
+#define GXP_SPI_TIMEOUT (130 * 1000000 / GXP_SPI_SLEEP_TIME)
+
+#define MANUAL_MODE 0
+#define DIRECT_MODE 1
+#define SPILDAT_LEN 256
+
+#define OFFSET_SPIMCFG 0x0
+#define OFFSET_SPIMCTRL 0x4
+#define OFFSET_SPICMD 0x5
+#define OFFSET_SPIDCNT 0x6
+#define OFFSET_SPIADDR 0x8
+#define OFFSET_SPIINTSTS 0xc
+
+#define SPIMCTRL_START 0x01
+#define SPIMCTRL_BUSY 0x02
+#define SPIMCTRL_DIR 0x08
+
+struct gxp_spi;
+
+struct gxp_spi_chip {
+ struct gxp_spi *spifi;
+ u32 cs;
+};
+
+struct gxp_spi_data {
+ u32 max_cs;
+ u32 mode_bits;
+};
+
+struct gxp_spi {
+ const struct gxp_spi_data *data;
+ void __iomem *reg_base;
+ void __iomem *dat_base;
+ void __iomem *dir_base;
+ struct device *dev;
+ struct gxp_spi_chip chips[GXP_SPI0_MAX_CHIPSELECT];
+};
+
+static void gxp_spi_set_mode(struct gxp_spi *spifi, int mode)
+{
+ u8 value;
+ void __iomem *reg_base = spifi->reg_base;
+
+ value = readb(reg_base + OFFSET_SPIMCTRL);
+
+ if (mode == MANUAL_MODE) {
+ writeb(0x55, reg_base + OFFSET_SPICMD);
+ writeb(0xaa, reg_base + OFFSET_SPICMD);
+ value &= ~0x30;
+ } else {
+ value |= 0x30;
+ }
+ writeb(value, reg_base + OFFSET_SPIMCTRL);
+}
+
+static int gxp_spi_read_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
+{
+ int ret;
+ struct gxp_spi *spifi = chip->spifi;
+ void __iomem *reg_base = spifi->reg_base;
+ u32 value;
+
+ value = readl(reg_base + OFFSET_SPIMCFG);
+ value &= ~(1 << 24);
+ value |= (chip->cs << 24);
+ value &= ~(0x07 << 16);
+ value &= ~(0x1f << 19);
+ writel(value, reg_base + OFFSET_SPIMCFG);
+
+ writel(0, reg_base + OFFSET_SPIADDR);
+
+ writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD);
+
+ writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT);
+
+ value = readb(reg_base + OFFSET_SPIMCTRL);
+ value &= ~SPIMCTRL_DIR;
+ value |= SPIMCTRL_START;
+
+ writeb(value, reg_base + OFFSET_SPIMCTRL);
+
+ ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value,
+ !(value & SPIMCTRL_BUSY),
+ GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT);
+ if (ret) {
+ dev_warn(spifi->dev, "read reg busy time out\n");
+ return ret;
+ }
+
+ memcpy_fromio(op->data.buf.in, spifi->dat_base, op->data.nbytes);
+ return ret;
+}
+
+static int gxp_spi_write_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
+{
+ int ret;
+ struct gxp_spi *spifi = chip->spifi;
+ void __iomem *reg_base = spifi->reg_base;
+ u32 value;
+
+ value = readl(reg_base + OFFSET_SPIMCFG);
+ value &= ~(1 << 24);
+ value |= (chip->cs << 24);
+ value &= ~(0x07 << 16);
+ value &= ~(0x1f << 19);
+ writel(value, reg_base + OFFSET_SPIMCFG);
+
+ writel(0, reg_base + OFFSET_SPIADDR);
+
+ writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD);
+
+ memcpy_toio(spifi->dat_base, op->data.buf.in, op->data.nbytes);
+
+ writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT);
+
+ value = readb(reg_base + OFFSET_SPIMCTRL);
+ value |= SPIMCTRL_DIR;
+ value |= SPIMCTRL_START;
+
+ writeb(value, reg_base + OFFSET_SPIMCTRL);
+
+ ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value,
+ !(value & SPIMCTRL_BUSY),
+ GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT);
+ if (ret)
+ dev_warn(spifi->dev, "write reg busy time out\n");
+
+ return ret;
+}
+
+static ssize_t gxp_spi_read(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
+{
+ struct gxp_spi *spifi = chip->spifi;
+ u32 offset = op->addr.val;
+
+ if (chip->cs == 0)
+ offset += 0x4000000;
+
+ memcpy_fromio(op->data.buf.in, spifi->dir_base + offset, op->data.nbytes);
+
+ return 0;
+}
+
+static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
+{
+ struct gxp_spi *spifi = chip->spifi;
+ void __iomem *reg_base = spifi->reg_base;
+ u32 write_len;
+ u32 value;
+ int ret;
+
+ write_len = op->data.nbytes;
+ if (write_len > SPILDAT_LEN)
+ write_len = SPILDAT_LEN;
+
+ value = readl(reg_base + OFFSET_SPIMCFG);
+ value &= ~(1 << 24);
+ value |= (chip->cs << 24);
+ value &= ~(0x07 << 16);
+ value |= (op->addr.nbytes << 16);
+ value &= ~(0x1f << 19);
+ writel(value, reg_base + OFFSET_SPIMCFG);
+
+ writel(op->addr.val, reg_base + OFFSET_SPIADDR);
+
+ writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD);
+
+ writew(write_len, reg_base + OFFSET_SPIDCNT);
+
+ memcpy_toio(spifi->dat_base, op->data.buf.in, write_len);
+
+ value = readb(reg_base + OFFSET_SPIMCTRL);
+ value |= SPIMCTRL_DIR;
+ value |= SPIMCTRL_START;
+
+ writeb(value, reg_base + OFFSET_SPIMCTRL);
+
+ ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value,
+ !(value & SPIMCTRL_BUSY),
+ GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT);
+ if (ret) {
+ dev_warn(spifi->dev, "write busy time out\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct gxp_spi *spifi = spi_controller_get_devdata(mem->spi->master);
+ struct gxp_spi_chip *chip = &spifi->chips[mem->spi->chip_select];
+ int ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (!op->addr.nbytes)
+ ret = gxp_spi_read_reg(chip, op);
+ else
+ ret = gxp_spi_read(chip, op);
+ } else {
+ if (!op->addr.nbytes)
+ ret = gxp_spi_write_reg(chip, op);
+ else
+ ret = gxp_spi_write(chip, op);
+ }
+
+ return ret;
+}
+
+static int gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ int ret;
+
+ ret = do_gxp_exec_mem_op(mem, op);
+ if (ret)
+ dev_err(&mem->spi->dev, "operation failed: %d", ret);
+
+ return ret;
+}
+
+static const struct spi_controller_mem_ops gxp_spi_mem_ops = {
+ .exec_op = gxp_exec_mem_op,
+};
+
+static int gxp_spi_setup(struct spi_device *spi)
+{
+ struct gxp_spi *spifi = spi_controller_get_devdata(spi->master);
+ unsigned int cs = spi->chip_select;
+ struct gxp_spi_chip *chip = &spifi->chips[cs];
+
+ chip->spifi = spifi;
+ chip->cs = cs;
+
+ gxp_spi_set_mode(spifi, MANUAL_MODE);
+
+ return 0;
+}
+
+static int gxp_spifi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct gxp_spi_data *data;
+ struct spi_controller *ctlr;
+ struct gxp_spi *spifi;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ ctlr = devm_spi_alloc_master(dev, sizeof(*spifi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ spifi = spi_controller_get_devdata(ctlr);
+
+ platform_set_drvdata(pdev, spifi);
+ spifi->data = data;
+ spifi->dev = dev;
+
+ spifi->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spifi->reg_base))
+ return PTR_ERR(spifi->reg_base);
+
+ spifi->dat_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(spifi->dat_base))
+ return PTR_ERR(spifi->dat_base);
+
+ spifi->dir_base = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(spifi->dir_base))
+ return PTR_ERR(spifi->dir_base);
+
+ ctlr->mode_bits = data->mode_bits;
+ ctlr->bus_num = pdev->id;
+ ctlr->mem_ops = &gxp_spi_mem_ops;
+ ctlr->setup = gxp_spi_setup;
+ ctlr->num_chipselect = data->max_cs;
+ ctlr->dev.of_node = dev->of_node;
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret) {
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register spi controller\n");
+ }
+
+ return 0;
+}
+
+static const struct gxp_spi_data gxp_spifi_data = {
+ .max_cs = 2,
+ .mode_bits = 0,
+};
+
+static const struct of_device_id gxp_spifi_match[] = {
+ {.compatible = "hpe,gxp-spifi", .data = &gxp_spifi_data },
+ { /* null */ }
+};
+MODULE_DEVICE_TABLE(of, gxp_spifi_match);
+
+static struct platform_driver gxp_spifi_driver = {
+ .probe = gxp_spifi_probe,
+ .driver = {
+ .name = "gxp-spifi",
+ .of_match_table = gxp_spifi_match,
+ },
+};
+module_platform_driver(gxp_spifi_driver);
+
+MODULE_DESCRIPTION("HPE GXP SPI Flash Interface driver");
+MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
new file mode 100644
index 000000000..525cc0143
--- /dev/null
+++ b/drivers/spi/spi-hisi-kunpeng.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// HiSilicon SPI Controller Driver for Kunpeng SoCs
+//
+// Copyright (c) 2021 HiSilicon Technologies Co., Ltd.
+// Author: Jay Fang <f.fangjian@huawei.com>
+//
+// This code is based on spi-dw-core.c.
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+/* Register offsets */
+#define HISI_SPI_CSCR 0x00 /* cs control register */
+#define HISI_SPI_CR 0x04 /* spi common control register */
+#define HISI_SPI_ENR 0x08 /* spi enable register */
+#define HISI_SPI_FIFOC 0x0c /* fifo level control register */
+#define HISI_SPI_IMR 0x10 /* interrupt mask register */
+#define HISI_SPI_DIN 0x14 /* data in register */
+#define HISI_SPI_DOUT 0x18 /* data out register */
+#define HISI_SPI_SR 0x1c /* status register */
+#define HISI_SPI_RISR 0x20 /* raw interrupt status register */
+#define HISI_SPI_ISR 0x24 /* interrupt status register */
+#define HISI_SPI_ICR 0x28 /* interrupt clear register */
+#define HISI_SPI_VERSION 0xe0 /* version register */
+
+/* Bit fields in HISI_SPI_CR */
+#define CR_LOOP_MASK GENMASK(1, 1)
+#define CR_CPOL_MASK GENMASK(2, 2)
+#define CR_CPHA_MASK GENMASK(3, 3)
+#define CR_DIV_PRE_MASK GENMASK(11, 4)
+#define CR_DIV_POST_MASK GENMASK(19, 12)
+#define CR_BPW_MASK GENMASK(24, 20)
+#define CR_SPD_MODE_MASK GENMASK(25, 25)
+
+/* Bit fields in HISI_SPI_FIFOC */
+#define FIFOC_TX_MASK GENMASK(5, 3)
+#define FIFOC_RX_MASK GENMASK(11, 9)
+
+/* Bit fields in HISI_SPI_IMR, 4 bits */
+#define IMR_RXOF BIT(0) /* Receive Overflow */
+#define IMR_RXTO BIT(1) /* Receive Timeout */
+#define IMR_RX BIT(2) /* Receive */
+#define IMR_TX BIT(3) /* Transmit */
+#define IMR_MASK (IMR_RXOF | IMR_RXTO | IMR_RX | IMR_TX)
+
+/* Bit fields in HISI_SPI_SR, 5 bits */
+#define SR_TXE BIT(0) /* Transmit FIFO empty */
+#define SR_TXNF BIT(1) /* Transmit FIFO not full */
+#define SR_RXNE BIT(2) /* Receive FIFO not empty */
+#define SR_RXF BIT(3) /* Receive FIFO full */
+#define SR_BUSY BIT(4) /* Busy Flag */
+
+/* Bit fields in HISI_SPI_ISR, 4 bits */
+#define ISR_RXOF BIT(0) /* Receive Overflow */
+#define ISR_RXTO BIT(1) /* Receive Timeout */
+#define ISR_RX BIT(2) /* Receive */
+#define ISR_TX BIT(3) /* Transmit */
+#define ISR_MASK (ISR_RXOF | ISR_RXTO | ISR_RX | ISR_TX)
+
+/* Bit fields in HISI_SPI_ICR, 2 bits */
+#define ICR_RXOF BIT(0) /* Receive Overflow */
+#define ICR_RXTO BIT(1) /* Receive Timeout */
+#define ICR_MASK (ICR_RXOF | ICR_RXTO)
+
+#define DIV_POST_MAX 0xFF
+#define DIV_POST_MIN 0x00
+#define DIV_PRE_MAX 0xFE
+#define DIV_PRE_MIN 0x02
+#define CLK_DIV_MAX ((1 + DIV_POST_MAX) * DIV_PRE_MAX)
+#define CLK_DIV_MIN ((1 + DIV_POST_MIN) * DIV_PRE_MIN)
+
+#define DEFAULT_NUM_CS 1
+
+#define HISI_SPI_WAIT_TIMEOUT_MS 10UL
+
+enum hisi_spi_rx_level_trig {
+ HISI_SPI_RX_1,
+ HISI_SPI_RX_4,
+ HISI_SPI_RX_8,
+ HISI_SPI_RX_16,
+ HISI_SPI_RX_32,
+ HISI_SPI_RX_64,
+ HISI_SPI_RX_128
+};
+
+enum hisi_spi_tx_level_trig {
+ HISI_SPI_TX_1_OR_LESS,
+ HISI_SPI_TX_4_OR_LESS,
+ HISI_SPI_TX_8_OR_LESS,
+ HISI_SPI_TX_16_OR_LESS,
+ HISI_SPI_TX_32_OR_LESS,
+ HISI_SPI_TX_64_OR_LESS,
+ HISI_SPI_TX_128_OR_LESS
+};
+
+enum hisi_spi_frame_n_bytes {
+ HISI_SPI_N_BYTES_NULL,
+ HISI_SPI_N_BYTES_U8,
+ HISI_SPI_N_BYTES_U16,
+ HISI_SPI_N_BYTES_U32 = 4
+};
+
+/* Slave spi_dev related */
+struct hisi_chip_data {
+ u32 cr;
+ u32 speed_hz; /* baud rate */
+ u16 clk_div; /* baud rate divider */
+
+ /* clk_div = (1 + div_post) * div_pre */
+ u8 div_post; /* value from 0 to 255 */
+ u8 div_pre; /* value from 2 to 254 (even only!) */
+};
+
+struct hisi_spi {
+ struct device *dev;
+
+ void __iomem *regs;
+ int irq;
+ u32 fifo_len; /* depth of the FIFO buffer */
+
+ /* Current message transfer state info */
+ const void *tx;
+ unsigned int tx_len;
+ void *rx;
+ unsigned int rx_len;
+ u8 n_bytes; /* current is a 1/2/4 bytes op */
+
+ struct dentry *debugfs;
+ struct debugfs_regset32 regset;
+};
+
+#define HISI_SPI_DBGFS_REG(_name, _off) \
+{ \
+ .name = _name, \
+ .offset = _off, \
+}
+
+static const struct debugfs_reg32 hisi_spi_regs[] = {
+ HISI_SPI_DBGFS_REG("CSCR", HISI_SPI_CSCR),
+ HISI_SPI_DBGFS_REG("CR", HISI_SPI_CR),
+ HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
+ HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
+ HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
+ HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
+ HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
+ HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
+ HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
+ HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
+ HISI_SPI_DBGFS_REG("ICR", HISI_SPI_ICR),
+ HISI_SPI_DBGFS_REG("VERSION", HISI_SPI_VERSION),
+};
+
+static int hisi_spi_debugfs_init(struct hisi_spi *hs)
+{
+ char name[32];
+
+ struct spi_controller *master;
+
+ master = container_of(hs->dev, struct spi_controller, dev);
+ snprintf(name, 32, "hisi_spi%d", master->bus_num);
+ hs->debugfs = debugfs_create_dir(name, NULL);
+ if (!hs->debugfs)
+ return -ENOMEM;
+
+ hs->regset.regs = hisi_spi_regs;
+ hs->regset.nregs = ARRAY_SIZE(hisi_spi_regs);
+ hs->regset.base = hs->regs;
+ debugfs_create_regset32("registers", 0400, hs->debugfs, &hs->regset);
+
+ return 0;
+}
+
+static u32 hisi_spi_busy(struct hisi_spi *hs)
+{
+ return readl(hs->regs + HISI_SPI_SR) & SR_BUSY;
+}
+
+static u32 hisi_spi_rx_not_empty(struct hisi_spi *hs)
+{
+ return readl(hs->regs + HISI_SPI_SR) & SR_RXNE;
+}
+
+static u32 hisi_spi_tx_not_full(struct hisi_spi *hs)
+{
+ return readl(hs->regs + HISI_SPI_SR) & SR_TXNF;
+}
+
+static void hisi_spi_flush_fifo(struct hisi_spi *hs)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ do {
+ while (hisi_spi_rx_not_empty(hs))
+ readl(hs->regs + HISI_SPI_DOUT);
+ } while (hisi_spi_busy(hs) && limit--);
+}
+
+/* Disable the controller and all interrupts */
+static void hisi_spi_disable(struct hisi_spi *hs)
+{
+ writel(0, hs->regs + HISI_SPI_ENR);
+ writel(IMR_MASK, hs->regs + HISI_SPI_IMR);
+ writel(ICR_MASK, hs->regs + HISI_SPI_ICR);
+}
+
+static u8 hisi_spi_n_bytes(struct spi_transfer *transfer)
+{
+ if (transfer->bits_per_word <= 8)
+ return HISI_SPI_N_BYTES_U8;
+ else if (transfer->bits_per_word <= 16)
+ return HISI_SPI_N_BYTES_U16;
+ else
+ return HISI_SPI_N_BYTES_U32;
+}
+
+static void hisi_spi_reader(struct hisi_spi *hs)
+{
+ u32 max = min_t(u32, hs->rx_len, hs->fifo_len);
+ u32 rxw;
+
+ while (hisi_spi_rx_not_empty(hs) && max--) {
+ rxw = readl(hs->regs + HISI_SPI_DOUT);
+ /* Check the transfer's original "rx" is not null */
+ if (hs->rx) {
+ switch (hs->n_bytes) {
+ case HISI_SPI_N_BYTES_U8:
+ *(u8 *)(hs->rx) = rxw;
+ break;
+ case HISI_SPI_N_BYTES_U16:
+ *(u16 *)(hs->rx) = rxw;
+ break;
+ case HISI_SPI_N_BYTES_U32:
+ *(u32 *)(hs->rx) = rxw;
+ break;
+ }
+ hs->rx += hs->n_bytes;
+ }
+ --hs->rx_len;
+ }
+}
+
+static void hisi_spi_writer(struct hisi_spi *hs)
+{
+ u32 max = min_t(u32, hs->tx_len, hs->fifo_len);
+ u32 txw = 0;
+
+ while (hisi_spi_tx_not_full(hs) && max--) {
+ /* Check the transfer's original "tx" is not null */
+ if (hs->tx) {
+ switch (hs->n_bytes) {
+ case HISI_SPI_N_BYTES_U8:
+ txw = *(u8 *)(hs->tx);
+ break;
+ case HISI_SPI_N_BYTES_U16:
+ txw = *(u16 *)(hs->tx);
+ break;
+ case HISI_SPI_N_BYTES_U32:
+ txw = *(u32 *)(hs->tx);
+ break;
+ }
+ hs->tx += hs->n_bytes;
+ }
+ writel(txw, hs->regs + HISI_SPI_DIN);
+ --hs->tx_len;
+ }
+}
+
+static void __hisi_calc_div_reg(struct hisi_chip_data *chip)
+{
+ chip->div_pre = DIV_PRE_MAX;
+ while (chip->div_pre >= DIV_PRE_MIN) {
+ if (chip->clk_div % chip->div_pre == 0)
+ break;
+
+ chip->div_pre -= 2;
+ }
+
+ if (chip->div_pre > chip->clk_div)
+ chip->div_pre = chip->clk_div;
+
+ chip->div_post = (chip->clk_div / chip->div_pre) - 1;
+}
+
+static u32 hisi_calc_effective_speed(struct spi_controller *master,
+ struct hisi_chip_data *chip, u32 speed_hz)
+{
+ u32 effective_speed;
+
+ /* Note clock divider doesn't support odd numbers */
+ chip->clk_div = DIV_ROUND_UP(master->max_speed_hz, speed_hz) + 1;
+ chip->clk_div &= 0xfffe;
+ if (chip->clk_div > CLK_DIV_MAX)
+ chip->clk_div = CLK_DIV_MAX;
+
+ effective_speed = master->max_speed_hz / chip->clk_div;
+ if (chip->speed_hz != effective_speed) {
+ __hisi_calc_div_reg(chip);
+ chip->speed_hz = effective_speed;
+ }
+
+ return effective_speed;
+}
+
+static u32 hisi_spi_prepare_cr(struct spi_device *spi)
+{
+ u32 cr = FIELD_PREP(CR_SPD_MODE_MASK, 1);
+
+ cr |= FIELD_PREP(CR_CPHA_MASK, (spi->mode & SPI_CPHA) ? 1 : 0);
+ cr |= FIELD_PREP(CR_CPOL_MASK, (spi->mode & SPI_CPOL) ? 1 : 0);
+ cr |= FIELD_PREP(CR_LOOP_MASK, (spi->mode & SPI_LOOP) ? 1 : 0);
+
+ return cr;
+}
+
+static void hisi_spi_hw_init(struct hisi_spi *hs)
+{
+ hisi_spi_disable(hs);
+
+ /* FIFO default config */
+ writel(FIELD_PREP(FIFOC_TX_MASK, HISI_SPI_TX_64_OR_LESS) |
+ FIELD_PREP(FIFOC_RX_MASK, HISI_SPI_RX_16),
+ hs->regs + HISI_SPI_FIFOC);
+
+ hs->fifo_len = 256;
+}
+
+static irqreturn_t hisi_spi_irq(int irq, void *dev_id)
+{
+ struct spi_controller *master = dev_id;
+ struct hisi_spi *hs = spi_controller_get_devdata(master);
+ u32 irq_status = readl(hs->regs + HISI_SPI_ISR) & ISR_MASK;
+
+ if (!irq_status)
+ return IRQ_NONE;
+
+ if (!master->cur_msg)
+ return IRQ_HANDLED;
+
+ /* Error handling */
+ if (irq_status & ISR_RXOF) {
+ dev_err(hs->dev, "interrupt_transfer: fifo overflow\n");
+ master->cur_msg->status = -EIO;
+ goto finalize_transfer;
+ }
+
+ /*
+ * Read data from the Rx FIFO every time. If there is
+ * nothing left to receive, finalize the transfer.
+ */
+ hisi_spi_reader(hs);
+ if (!hs->rx_len)
+ goto finalize_transfer;
+
+ /* Send data out when Tx FIFO IRQ triggered */
+ if (irq_status & ISR_TX)
+ hisi_spi_writer(hs);
+
+ return IRQ_HANDLED;
+
+finalize_transfer:
+ hisi_spi_disable(hs);
+ spi_finalize_current_transfer(master);
+ return IRQ_HANDLED;
+}
+
+static int hisi_spi_transfer_one(struct spi_controller *master,
+ struct spi_device *spi, struct spi_transfer *transfer)
+{
+ struct hisi_spi *hs = spi_controller_get_devdata(master);
+ struct hisi_chip_data *chip = spi_get_ctldata(spi);
+ u32 cr = chip->cr;
+
+ /* Update per transfer options for speed and bpw */
+ transfer->effective_speed_hz =
+ hisi_calc_effective_speed(master, chip, transfer->speed_hz);
+ cr |= FIELD_PREP(CR_DIV_PRE_MASK, chip->div_pre);
+ cr |= FIELD_PREP(CR_DIV_POST_MASK, chip->div_post);
+ cr |= FIELD_PREP(CR_BPW_MASK, transfer->bits_per_word - 1);
+ writel(cr, hs->regs + HISI_SPI_CR);
+
+ hisi_spi_flush_fifo(hs);
+
+ hs->n_bytes = hisi_spi_n_bytes(transfer);
+ hs->tx = transfer->tx_buf;
+ hs->tx_len = transfer->len / hs->n_bytes;
+ hs->rx = transfer->rx_buf;
+ hs->rx_len = hs->tx_len;
+
+ /*
+ * Ensure that the transfer data above has been updated
+ * before the interrupt to start.
+ */
+ smp_mb();
+
+ /* Enable all interrupts and the controller */
+ writel(~(u32)IMR_MASK, hs->regs + HISI_SPI_IMR);
+ writel(1, hs->regs + HISI_SPI_ENR);
+
+ return 1;
+}
+
+static void hisi_spi_handle_err(struct spi_controller *master,
+ struct spi_message *msg)
+{
+ struct hisi_spi *hs = spi_controller_get_devdata(master);
+
+ hisi_spi_disable(hs);
+
+ /*
+ * Wait for interrupt handler that is
+ * already in timeout to complete.
+ */
+ msleep(HISI_SPI_WAIT_TIMEOUT_MS);
+}
+
+static int hisi_spi_setup(struct spi_device *spi)
+{
+ struct hisi_chip_data *chip;
+
+ /* Only alloc on first setup */
+ chip = spi_get_ctldata(spi);
+ if (!chip) {
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+ spi_set_ctldata(spi, chip);
+ }
+
+ chip->cr = hisi_spi_prepare_cr(spi);
+
+ return 0;
+}
+
+static void hisi_spi_cleanup(struct spi_device *spi)
+{
+ struct hisi_chip_data *chip = spi_get_ctldata(spi);
+
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+}
+
+static int hisi_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *master;
+ struct hisi_spi *hs;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ master = devm_spi_alloc_master(dev, sizeof(*hs));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ hs = spi_controller_get_devdata(master);
+ hs->dev = dev;
+ hs->irq = irq;
+
+ hs->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hs->regs))
+ return PTR_ERR(hs->regs);
+
+ /* Specify maximum SPI clocking speed (master only) by firmware */
+ ret = device_property_read_u32(dev, "spi-max-frequency",
+ &master->max_speed_hz);
+ if (ret) {
+ dev_err(dev, "failed to get max SPI clocking speed, ret=%d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u16(dev, "num-cs",
+ &master->num_chipselect);
+ if (ret)
+ master->num_chipselect = DEFAULT_NUM_CS;
+
+ master->use_gpio_descriptors = true;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->bus_num = pdev->id;
+ master->setup = hisi_spi_setup;
+ master->cleanup = hisi_spi_cleanup;
+ master->transfer_one = hisi_spi_transfer_one;
+ master->handle_err = hisi_spi_handle_err;
+ master->dev.fwnode = dev->fwnode;
+
+ hisi_spi_hw_init(hs);
+
+ ret = devm_request_irq(dev, hs->irq, hisi_spi_irq, 0, dev_name(dev),
+ master);
+ if (ret < 0) {
+ dev_err(dev, "failed to get IRQ=%d, ret=%d\n", hs->irq, ret);
+ return ret;
+ }
+
+ ret = spi_register_controller(master);
+ if (ret) {
+ dev_err(dev, "failed to register spi master, ret=%d\n", ret);
+ return ret;
+ }
+
+ if (hisi_spi_debugfs_init(hs))
+ dev_info(dev, "failed to create debugfs dir\n");
+
+ dev_info(dev, "hw version:0x%x max-freq:%u kHz\n",
+ readl(hs->regs + HISI_SPI_VERSION),
+ master->max_speed_hz / 1000);
+
+ return 0;
+}
+
+static int hisi_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *master = platform_get_drvdata(pdev);
+ struct hisi_spi *hs = spi_controller_get_devdata(master);
+
+ debugfs_remove_recursive(hs->debugfs);
+ spi_unregister_controller(master);
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_spi_acpi_match[] = {
+ {"HISI03E1", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_spi_acpi_match);
+
+static struct platform_driver hisi_spi_driver = {
+ .probe = hisi_spi_probe,
+ .remove = hisi_spi_remove,
+ .driver = {
+ .name = "hisi-kunpeng-spi",
+ .acpi_match_table = hisi_spi_acpi_match,
+ },
+};
+module_platform_driver(hisi_spi_driver);
+
+MODULE_AUTHOR("Jay Fang <f.fangjian@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon SPI Controller Driver for Kunpeng SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
new file mode 100644
index 000000000..d3a23b1c2
--- /dev/null
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets
+//
+// Copyright (c) 2019 HiSilicon Technologies Co., Ltd.
+// Author: John Garry <john.garry@huawei.com>
+
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/dmi.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define HISI_SFC_V3XX_VERSION (0x1f8)
+
+#define HISI_SFC_V3XX_GLB_CFG (0x100)
+#define HISI_SFC_V3XX_GLB_CFG_CS0_ADDR_MODE BIT(2)
+#define HISI_SFC_V3XX_RAW_INT_STAT (0x120)
+#define HISI_SFC_V3XX_INT_STAT (0x124)
+#define HISI_SFC_V3XX_INT_MASK (0x128)
+#define HISI_SFC_V3XX_INT_CLR (0x12c)
+#define HISI_SFC_V3XX_CMD_CFG (0x300)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
+#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
+#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4
+#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3)
+#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1
+#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0)
+#define HISI_SFC_V3XX_CMD_INS (0x308)
+#define HISI_SFC_V3XX_CMD_ADDR (0x30c)
+#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400)
+
+/* Common definition of interrupt bit masks */
+#define HISI_SFC_V3XX_INT_MASK_ALL (0x1ff) /* all the masks */
+#define HISI_SFC_V3XX_INT_MASK_CPLT BIT(0) /* command execution complete */
+#define HISI_SFC_V3XX_INT_MASK_PP_ERR BIT(2) /* page progrom error */
+#define HISI_SFC_V3XX_INT_MASK_IACCES BIT(5) /* error visiting inaccessible/
+ * protected address
+ */
+
+/* IO Mode definition in HISI_SFC_V3XX_CMD_CFG */
+#define HISI_SFC_V3XX_STD (0 << 17)
+#define HISI_SFC_V3XX_DIDO (1 << 17)
+#define HISI_SFC_V3XX_DIO (2 << 17)
+#define HISI_SFC_V3XX_FULL_DIO (3 << 17)
+#define HISI_SFC_V3XX_QIQO (5 << 17)
+#define HISI_SFC_V3XX_QIO (6 << 17)
+#define HISI_SFC_V3XX_FULL_QIO (7 << 17)
+
+/*
+ * The IO modes lookup table. hisi_sfc_v3xx_io_modes[(z - 1) / 2][y / 2][x / 2]
+ * stands for x-y-z mode, as described in SFDP terminology. -EIO indicates
+ * an invalid mode.
+ */
+static const int hisi_sfc_v3xx_io_modes[2][3][3] = {
+ {
+ { HISI_SFC_V3XX_DIDO, HISI_SFC_V3XX_DIDO, HISI_SFC_V3XX_DIDO },
+ { HISI_SFC_V3XX_DIO, HISI_SFC_V3XX_FULL_DIO, -EIO },
+ { -EIO, -EIO, -EIO },
+ },
+ {
+ { HISI_SFC_V3XX_QIQO, HISI_SFC_V3XX_QIQO, HISI_SFC_V3XX_QIQO },
+ { -EIO, -EIO, -EIO },
+ { HISI_SFC_V3XX_QIO, -EIO, HISI_SFC_V3XX_FULL_QIO },
+ },
+};
+
+struct hisi_sfc_v3xx_host {
+ struct device *dev;
+ void __iomem *regbase;
+ int max_cmd_dword;
+ struct completion *completion;
+ u8 address_mode;
+ int irq;
+};
+
+static void hisi_sfc_v3xx_disable_int(struct hisi_sfc_v3xx_host *host)
+{
+ writel(0, host->regbase + HISI_SFC_V3XX_INT_MASK);
+}
+
+static void hisi_sfc_v3xx_enable_int(struct hisi_sfc_v3xx_host *host)
+{
+ writel(HISI_SFC_V3XX_INT_MASK_ALL, host->regbase + HISI_SFC_V3XX_INT_MASK);
+}
+
+static void hisi_sfc_v3xx_clear_int(struct hisi_sfc_v3xx_host *host)
+{
+ writel(HISI_SFC_V3XX_INT_MASK_ALL, host->regbase + HISI_SFC_V3XX_INT_CLR);
+}
+
+/*
+ * The interrupt status register indicates whether an error occurs
+ * after per operation. Check it, and clear the interrupts for
+ * next time judgement.
+ */
+static int hisi_sfc_v3xx_handle_completion(struct hisi_sfc_v3xx_host *host)
+{
+ u32 reg;
+
+ reg = readl(host->regbase + HISI_SFC_V3XX_RAW_INT_STAT);
+ hisi_sfc_v3xx_clear_int(host);
+
+ if (reg & HISI_SFC_V3XX_INT_MASK_IACCES) {
+ dev_err(host->dev, "fail to access protected address\n");
+ return -EIO;
+ }
+
+ if (reg & HISI_SFC_V3XX_INT_MASK_PP_ERR) {
+ dev_err(host->dev, "page program operation failed\n");
+ return -EIO;
+ }
+
+ /*
+ * The other bits of the interrupt registers is not currently
+ * used and probably not be triggered in this driver. When it
+ * happens, we regard it as an unsupported error here.
+ */
+ if (!(reg & HISI_SFC_V3XX_INT_MASK_CPLT)) {
+ dev_err(host->dev, "unsupported error occurred, status=0x%x\n", reg);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000
+#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10
+
+static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host)
+{
+ u32 reg;
+
+ return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg,
+ !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK),
+ HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US,
+ HISI_SFC_V3XX_WAIT_TIMEOUT_US);
+}
+
+static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
+ struct spi_mem_op *op)
+{
+ struct spi_device *spi = mem->spi;
+ struct hisi_sfc_v3xx_host *host;
+ uintptr_t addr = (uintptr_t)op->data.buf.in;
+ int max_byte_count;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ max_byte_count = host->max_cmd_dword * 4;
+
+ if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4)
+ op->data.nbytes = 4 - (addr % 4);
+ else if (op->data.nbytes > max_byte_count)
+ op->data.nbytes = max_byte_count;
+
+ return 0;
+}
+
+/*
+ * The controller only supports Standard SPI mode, Duall mode and
+ * Quad mode. Double sanitize the ops here to avoid OOB access.
+ */
+static bool hisi_sfc_v3xx_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct spi_device *spi = mem->spi;
+ struct hisi_sfc_v3xx_host *host;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ if (op->data.buswidth > 4 || op->dummy.buswidth > 4 ||
+ op->addr.buswidth > 4 || op->cmd.buswidth > 4)
+ return false;
+
+ if (op->addr.nbytes != host->address_mode && op->addr.nbytes)
+ return false;
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+/*
+ * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the
+ * DATABUF registers -so use __io{read,write}32_copy when possible. For
+ * trailing bytes, copy them byte-by-byte from the DATABUF register, as we
+ * can't clobber outside the source/dest buffer.
+ *
+ * For efficient data read/write, we try to put any start 32b unaligned data
+ * into a separate transaction in hisi_sfc_v3xx_adjust_op_size().
+ */
+static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host,
+ u8 *to, unsigned int len)
+{
+ void __iomem *from;
+ int i;
+
+ from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+ if (IS_ALIGNED((uintptr_t)to, 4)) {
+ int words = len / 4;
+
+ __ioread32_copy(to, from, words);
+
+ len -= words * 4;
+ if (len) {
+ u32 val;
+
+ to += words * 4;
+ from += words * 4;
+
+ val = __raw_readl(from);
+
+ for (i = 0; i < len; i++, val >>= 8, to++)
+ *to = (u8)val;
+ }
+ } else {
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) {
+ u32 val = __raw_readl(from);
+ int j;
+
+ for (j = 0; j < 4 && (j + (i * 4) < len);
+ to++, val >>= 8, j++)
+ *to = (u8)val;
+ }
+ }
+}
+
+static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host,
+ const u8 *from, unsigned int len)
+{
+ void __iomem *to;
+ int i;
+
+ to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+ if (IS_ALIGNED((uintptr_t)from, 4)) {
+ int words = len / 4;
+
+ __iowrite32_copy(to, from, words);
+
+ len -= words * 4;
+ if (len) {
+ u32 val = 0;
+
+ to += words * 4;
+ from += words * 4;
+
+ for (i = 0; i < len; i++, from++)
+ val |= *from << i * 8;
+ __raw_writel(val, to);
+ }
+
+ } else {
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) {
+ u32 val = 0;
+ int j;
+
+ for (j = 0; j < 4 && (j + (i * 4) < len);
+ from++, j++)
+ val |= *from << j * 8;
+ __raw_writel(val, to);
+ }
+ }
+}
+
+static int hisi_sfc_v3xx_start_bus(struct hisi_sfc_v3xx_host *host,
+ const struct spi_mem_op *op,
+ u8 chip_select)
+{
+ int len = op->data.nbytes, buswidth_mode;
+ u32 config = 0;
+
+ if (op->addr.nbytes)
+ config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
+
+ if (op->data.buswidth == 0 || op->data.buswidth == 1) {
+ buswidth_mode = HISI_SFC_V3XX_STD;
+ } else {
+ int data_idx, addr_idx, cmd_idx;
+
+ data_idx = (op->data.buswidth - 1) / 2;
+ addr_idx = op->addr.buswidth / 2;
+ cmd_idx = op->cmd.buswidth / 2;
+ buswidth_mode = hisi_sfc_v3xx_io_modes[data_idx][addr_idx][cmd_idx];
+ }
+ if (buswidth_mode < 0)
+ return buswidth_mode;
+ config |= buswidth_mode;
+
+ if (op->data.dir != SPI_MEM_NO_DATA) {
+ config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
+ config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK;
+
+ config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF |
+ chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF |
+ HISI_SFC_V3XX_CMD_CFG_START_MSK;
+
+ writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR);
+ writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS);
+
+ writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG);
+
+ return 0;
+}
+
+static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
+ const struct spi_mem_op *op,
+ u8 chip_select)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ int ret;
+
+ if (host->irq) {
+ host->completion = &done;
+ hisi_sfc_v3xx_enable_int(host);
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, op->data.nbytes);
+
+ ret = hisi_sfc_v3xx_start_bus(host, op, chip_select);
+ if (ret)
+ return ret;
+
+ if (host->irq) {
+ ret = wait_for_completion_timeout(host->completion,
+ usecs_to_jiffies(HISI_SFC_V3XX_WAIT_TIMEOUT_US));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+
+ hisi_sfc_v3xx_disable_int(host);
+ synchronize_irq(host->irq);
+ host->completion = NULL;
+ } else {
+ ret = hisi_sfc_v3xx_wait_cmd_idle(host);
+ }
+ if (hisi_sfc_v3xx_handle_completion(host) || ret)
+ return -EIO;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, op->data.nbytes);
+
+ return 0;
+}
+
+static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct hisi_sfc_v3xx_host *host;
+ struct spi_device *spi = mem->spi;
+ u8 chip_select = spi->chip_select;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select);
+}
+
+static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
+ .adjust_op_size = hisi_sfc_v3xx_adjust_op_size,
+ .supports_op = hisi_sfc_v3xx_supports_op,
+ .exec_op = hisi_sfc_v3xx_exec_op,
+};
+
+static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
+{
+ struct hisi_sfc_v3xx_host *host = data;
+
+ hisi_sfc_v3xx_disable_int(host);
+
+ complete(host->completion);
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_sfc_v3xx_buswidth_override_bits;
+
+/*
+ * ACPI FW does not allow us to currently set the device buswidth, so quirk it
+ * depending on the board.
+ */
+static int __init hisi_sfc_v3xx_dmi_quirk(const struct dmi_system_id *d)
+{
+ hisi_sfc_v3xx_buswidth_override_bits = SPI_RX_QUAD | SPI_TX_QUAD;
+
+ return 0;
+}
+
+static const struct dmi_system_id hisi_sfc_v3xx_dmi_quirk_table[] = {
+ {
+ .callback = hisi_sfc_v3xx_dmi_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "D06"),
+ },
+ },
+ {
+ .callback = hisi_sfc_v3xx_dmi_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 2280 V2"),
+ },
+ },
+ {
+ .callback = hisi_sfc_v3xx_dmi_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 200 (Model 2280)"),
+ },
+ },
+ {}
+};
+
+static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_sfc_v3xx_host *host;
+ struct spi_controller *ctlr;
+ u32 version, glb_config;
+ int ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*host));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+
+ ctlr->buswidth_override_bits = hisi_sfc_v3xx_buswidth_override_bits;
+
+ host = spi_controller_get_devdata(ctlr);
+ host->dev = dev;
+
+ platform_set_drvdata(pdev, host);
+
+ host->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->regbase)) {
+ ret = PTR_ERR(host->regbase);
+ goto err_put_master;
+ }
+
+ host->irq = platform_get_irq_optional(pdev, 0);
+ if (host->irq == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_put_master;
+ }
+
+ hisi_sfc_v3xx_disable_int(host);
+
+ if (host->irq > 0) {
+ ret = devm_request_irq(dev, host->irq, hisi_sfc_v3xx_isr, 0,
+ "hisi-sfc-v3xx", host);
+
+ if (ret) {
+ dev_err(dev, "failed to request irq%d, ret = %d\n", host->irq, ret);
+ host->irq = 0;
+ }
+ } else {
+ host->irq = 0;
+ }
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 1;
+ ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
+
+ /*
+ * The address mode of the controller is either 3 or 4,
+ * which is indicated by the address mode bit in
+ * the global config register. The register is read only
+ * for the OS driver.
+ */
+ glb_config = readl(host->regbase + HISI_SFC_V3XX_GLB_CFG);
+ if (glb_config & HISI_SFC_V3XX_GLB_CFG_CS0_ADDR_MODE)
+ host->address_mode = 4;
+ else
+ host->address_mode = 3;
+
+ version = readl(host->regbase + HISI_SFC_V3XX_VERSION);
+
+ if (version >= 0x351)
+ host->max_cmd_dword = 64;
+ else
+ host->max_cmd_dword = 16;
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret)
+ goto err_put_master;
+
+ dev_info(&pdev->dev, "hw version 0x%x, %s mode.\n",
+ version, host->irq ? "irq" : "polling");
+
+ return 0;
+
+err_put_master:
+ spi_master_put(ctlr);
+ return ret;
+}
+
+static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = {
+ {"HISI0341", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids);
+
+static struct platform_driver hisi_sfc_v3xx_spi_driver = {
+ .driver = {
+ .name = "hisi-sfc-v3xx",
+ .acpi_match_table = hisi_sfc_v3xx_acpi_ids,
+ },
+ .probe = hisi_sfc_v3xx_probe,
+};
+
+static int __init hisi_sfc_v3xx_spi_init(void)
+{
+ dmi_check_system(hisi_sfc_v3xx_dmi_quirk_table);
+
+ return platform_driver_register(&hisi_sfc_v3xx_spi_driver);
+}
+
+static void __exit hisi_sfc_v3xx_spi_exit(void)
+{
+ platform_driver_unregister(&hisi_sfc_v3xx_spi_driver);
+}
+
+module_init(hisi_sfc_v3xx_spi_init);
+module_exit(hisi_sfc_v3xx_spi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets");
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
new file mode 100644
index 000000000..bfd12247f
--- /dev/null
+++ b/drivers/spi/spi-img-spfi.c
@@ -0,0 +1,768 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IMG SPFI controller driver
+ *
+ * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd.
+ * Copyright (C) 2014 Google, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+
+#define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x))
+#define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24
+#define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff
+#define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16
+#define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff
+#define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8
+#define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff
+#define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0
+#define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff
+
+#define SPFI_CONTROL 0x14
+#define SPFI_CONTROL_CONTINUE BIT(12)
+#define SPFI_CONTROL_SOFT_RESET BIT(11)
+#define SPFI_CONTROL_SEND_DMA BIT(10)
+#define SPFI_CONTROL_GET_DMA BIT(9)
+#define SPFI_CONTROL_SE BIT(8)
+#define SPFI_CONTROL_TMODE_SHIFT 5
+#define SPFI_CONTROL_TMODE_MASK 0x7
+#define SPFI_CONTROL_TMODE_SINGLE 0
+#define SPFI_CONTROL_TMODE_DUAL 1
+#define SPFI_CONTROL_TMODE_QUAD 2
+#define SPFI_CONTROL_SPFI_EN BIT(0)
+
+#define SPFI_TRANSACTION 0x18
+#define SPFI_TRANSACTION_TSIZE_SHIFT 16
+#define SPFI_TRANSACTION_TSIZE_MASK 0xffff
+
+#define SPFI_PORT_STATE 0x1c
+#define SPFI_PORT_STATE_DEV_SEL_SHIFT 20
+#define SPFI_PORT_STATE_DEV_SEL_MASK 0x7
+#define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x))
+#define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x))
+
+#define SPFI_TX_32BIT_VALID_DATA 0x20
+#define SPFI_TX_8BIT_VALID_DATA 0x24
+#define SPFI_RX_32BIT_VALID_DATA 0x28
+#define SPFI_RX_8BIT_VALID_DATA 0x2c
+
+#define SPFI_INTERRUPT_STATUS 0x30
+#define SPFI_INTERRUPT_ENABLE 0x34
+#define SPFI_INTERRUPT_CLEAR 0x38
+#define SPFI_INTERRUPT_IACCESS BIT(12)
+#define SPFI_INTERRUPT_GDEX8BIT BIT(11)
+#define SPFI_INTERRUPT_ALLDONETRIG BIT(9)
+#define SPFI_INTERRUPT_GDFUL BIT(8)
+#define SPFI_INTERRUPT_GDHF BIT(7)
+#define SPFI_INTERRUPT_GDEX32BIT BIT(6)
+#define SPFI_INTERRUPT_GDTRIG BIT(5)
+#define SPFI_INTERRUPT_SDFUL BIT(3)
+#define SPFI_INTERRUPT_SDHF BIT(2)
+#define SPFI_INTERRUPT_SDE BIT(1)
+#define SPFI_INTERRUPT_SDTRIG BIT(0)
+
+/*
+ * There are four parallel FIFOs of 16 bytes each. The word buffer
+ * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an
+ * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA)
+ * accesses only a single FIFO, resulting in an effective FIFO size of
+ * 16 bytes.
+ */
+#define SPFI_32BIT_FIFO_SIZE 64
+#define SPFI_8BIT_FIFO_SIZE 16
+
+struct img_spfi {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ void __iomem *regs;
+ phys_addr_t phys;
+ int irq;
+ struct clk *spfi_clk;
+ struct clk *sys_clk;
+
+ struct dma_chan *rx_ch;
+ struct dma_chan *tx_ch;
+ bool tx_dma_busy;
+ bool rx_dma_busy;
+};
+
+static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
+{
+ return readl(spfi->regs + reg);
+}
+
+static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg)
+{
+ writel(val, spfi->regs + reg);
+}
+
+static inline void spfi_start(struct img_spfi *spfi)
+{
+ u32 val;
+
+ val = spfi_readl(spfi, SPFI_CONTROL);
+ val |= SPFI_CONTROL_SPFI_EN;
+ spfi_writel(spfi, val, SPFI_CONTROL);
+}
+
+static inline void spfi_reset(struct img_spfi *spfi)
+{
+ spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
+ spfi_writel(spfi, 0, SPFI_CONTROL);
+}
+
+static int spfi_wait_all_done(struct img_spfi *spfi)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
+
+ while (time_before(jiffies, timeout)) {
+ u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+
+ if (status & SPFI_INTERRUPT_ALLDONETRIG) {
+ spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG,
+ SPFI_INTERRUPT_CLEAR);
+ return 0;
+ }
+ cpu_relax();
+ }
+
+ dev_err(spfi->dev, "Timed out waiting for transaction to complete\n");
+ spfi_reset(spfi);
+
+ return -ETIMEDOUT;
+}
+
+static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max / 4) {
+ spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (status & SPFI_INTERRUPT_SDFUL)
+ break;
+ spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
+ count++;
+ }
+
+ return count * 4;
+}
+
+static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max) {
+ spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (status & SPFI_INTERRUPT_SDFUL)
+ break;
+ spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA);
+ count++;
+ }
+
+ return count;
+}
+
+static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max / 4) {
+ spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
+ SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (!(status & SPFI_INTERRUPT_GDEX32BIT))
+ break;
+ buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
+ count++;
+ }
+
+ return count * 4;
+}
+
+static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max) {
+ spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT,
+ SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (!(status & SPFI_INTERRUPT_GDEX8BIT))
+ break;
+ buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA);
+ count++;
+ }
+
+ return count;
+}
+
+static int img_spfi_start_pio(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ unsigned int tx_bytes = 0, rx_bytes = 0;
+ const void *tx_buf = xfer->tx_buf;
+ void *rx_buf = xfer->rx_buf;
+ unsigned long timeout;
+ int ret;
+
+ if (tx_buf)
+ tx_bytes = xfer->len;
+ if (rx_buf)
+ rx_bytes = xfer->len;
+
+ spfi_start(spfi);
+
+ timeout = jiffies +
+ msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100);
+ while ((tx_bytes > 0 || rx_bytes > 0) &&
+ time_before(jiffies, timeout)) {
+ unsigned int tx_count, rx_count;
+
+ if (tx_bytes >= 4)
+ tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
+ else
+ tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
+
+ if (rx_bytes >= 4)
+ rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
+ else
+ rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
+
+ tx_buf += tx_count;
+ rx_buf += rx_count;
+ tx_bytes -= tx_count;
+ rx_bytes -= rx_count;
+
+ cpu_relax();
+ }
+
+ if (rx_bytes > 0 || tx_bytes > 0) {
+ dev_err(spfi->dev, "PIO transfer timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = spfi_wait_all_done(spfi);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void img_spfi_dma_rx_cb(void *data)
+{
+ struct img_spfi *spfi = data;
+ unsigned long flags;
+
+ spfi_wait_all_done(spfi);
+
+ spin_lock_irqsave(&spfi->lock, flags);
+ spfi->rx_dma_busy = false;
+ if (!spfi->tx_dma_busy)
+ spi_finalize_current_transfer(spfi->master);
+ spin_unlock_irqrestore(&spfi->lock, flags);
+}
+
+static void img_spfi_dma_tx_cb(void *data)
+{
+ struct img_spfi *spfi = data;
+ unsigned long flags;
+
+ spfi_wait_all_done(spfi);
+
+ spin_lock_irqsave(&spfi->lock, flags);
+ spfi->tx_dma_busy = false;
+ if (!spfi->rx_dma_busy)
+ spi_finalize_current_transfer(spfi->master);
+ spin_unlock_irqrestore(&spfi->lock, flags);
+}
+
+static int img_spfi_start_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
+ struct dma_slave_config rxconf, txconf;
+
+ spfi->rx_dma_busy = false;
+ spfi->tx_dma_busy = false;
+
+ if (xfer->rx_buf) {
+ rxconf.direction = DMA_DEV_TO_MEM;
+ if (xfer->len % 4 == 0) {
+ rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
+ rxconf.src_addr_width = 4;
+ rxconf.src_maxburst = 4;
+ } else {
+ rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
+ rxconf.src_addr_width = 1;
+ rxconf.src_maxburst = 4;
+ }
+ dmaengine_slave_config(spfi->rx_ch, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!rxdesc)
+ goto stop_dma;
+
+ rxdesc->callback = img_spfi_dma_rx_cb;
+ rxdesc->callback_param = spfi;
+ }
+
+ if (xfer->tx_buf) {
+ txconf.direction = DMA_MEM_TO_DEV;
+ if (xfer->len % 4 == 0) {
+ txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
+ txconf.dst_addr_width = 4;
+ txconf.dst_maxburst = 4;
+ } else {
+ txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
+ txconf.dst_addr_width = 1;
+ txconf.dst_maxburst = 4;
+ }
+ dmaengine_slave_config(spfi->tx_ch, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!txdesc)
+ goto stop_dma;
+
+ txdesc->callback = img_spfi_dma_tx_cb;
+ txdesc->callback_param = spfi;
+ }
+
+ if (xfer->rx_buf) {
+ spfi->rx_dma_busy = true;
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(spfi->rx_ch);
+ }
+
+ spfi_start(spfi);
+
+ if (xfer->tx_buf) {
+ spfi->tx_dma_busy = true;
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(spfi->tx_ch);
+ }
+
+ return 1;
+
+stop_dma:
+ dmaengine_terminate_all(spfi->rx_ch);
+ dmaengine_terminate_all(spfi->tx_ch);
+ return -EIO;
+}
+
+static void img_spfi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ unsigned long flags;
+
+ /*
+ * Stop all DMA and reset the controller if the previous transaction
+ * timed-out and never completed it's DMA.
+ */
+ spin_lock_irqsave(&spfi->lock, flags);
+ if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
+ spfi->tx_dma_busy = false;
+ spfi->rx_dma_busy = false;
+
+ dmaengine_terminate_all(spfi->tx_ch);
+ dmaengine_terminate_all(spfi->rx_ch);
+ }
+ spin_unlock_irqrestore(&spfi->lock, flags);
+}
+
+static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ u32 val;
+
+ val = spfi_readl(spfi, SPFI_PORT_STATE);
+ val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK <<
+ SPFI_PORT_STATE_DEV_SEL_SHIFT);
+ val |= msg->spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT;
+ if (msg->spi->mode & SPI_CPHA)
+ val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
+ else
+ val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
+ if (msg->spi->mode & SPI_CPOL)
+ val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
+ else
+ val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
+ spfi_writel(spfi, val, SPFI_PORT_STATE);
+
+ return 0;
+}
+
+static int img_spfi_unprepare(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+
+ spfi_reset(spfi);
+
+ return 0;
+}
+
+static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ u32 val, div;
+
+ /*
+ * output = spfi_clk * (BITCLK / 512), where BITCLK must be a
+ * power of 2 up to 128
+ */
+ div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz);
+ div = clamp(512 / (1 << get_count_order(div)), 1, 128);
+
+ val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select));
+ val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
+ SPFI_DEVICE_PARAMETER_BITCLK_SHIFT);
+ val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
+ spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select));
+
+ spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
+ SPFI_TRANSACTION);
+
+ val = spfi_readl(spfi, SPFI_CONTROL);
+ val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
+ if (xfer->tx_buf)
+ val |= SPFI_CONTROL_SEND_DMA;
+ if (xfer->rx_buf)
+ val |= SPFI_CONTROL_GET_DMA;
+ val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT);
+ if (xfer->tx_nbits == SPI_NBITS_DUAL &&
+ xfer->rx_nbits == SPI_NBITS_DUAL)
+ val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT;
+ else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
+ xfer->rx_nbits == SPI_NBITS_QUAD)
+ val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
+ val |= SPFI_CONTROL_SE;
+ spfi_writel(spfi, val, SPFI_CONTROL);
+}
+
+static int img_spfi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ int ret;
+
+ if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
+ dev_err(spfi->dev,
+ "Transfer length (%d) is greater than the max supported (%d)",
+ xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
+ return -EINVAL;
+ }
+
+ img_spfi_config(master, spi, xfer);
+ if (master->can_dma && master->can_dma(master, spi, xfer))
+ ret = img_spfi_start_dma(master, spi, xfer);
+ else
+ ret = img_spfi_start_pio(master, spi, xfer);
+
+ return ret;
+}
+
+static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ if (xfer->len > SPFI_32BIT_FIFO_SIZE)
+ return true;
+ return false;
+}
+
+static irqreturn_t img_spfi_irq(int irq, void *dev_id)
+{
+ struct img_spfi *spfi = (struct img_spfi *)dev_id;
+ u32 status;
+
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (status & SPFI_INTERRUPT_IACCESS) {
+ spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR);
+ dev_err(spfi->dev, "Illegal access interrupt");
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int img_spfi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct img_spfi *spfi;
+ struct resource *res;
+ int ret;
+ u32 max_speed_hz;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
+ if (!master)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, master);
+
+ spfi = spi_master_get_devdata(master);
+ spfi->dev = &pdev->dev;
+ spfi->master = master;
+ spin_lock_init(&spfi->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spfi->regs = devm_ioremap_resource(spfi->dev, res);
+ if (IS_ERR(spfi->regs)) {
+ ret = PTR_ERR(spfi->regs);
+ goto put_spi;
+ }
+ spfi->phys = res->start;
+
+ spfi->irq = platform_get_irq(pdev, 0);
+ if (spfi->irq < 0) {
+ ret = spfi->irq;
+ goto put_spi;
+ }
+ ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq,
+ IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi);
+ if (ret)
+ goto put_spi;
+
+ spfi->sys_clk = devm_clk_get(spfi->dev, "sys");
+ if (IS_ERR(spfi->sys_clk)) {
+ ret = PTR_ERR(spfi->sys_clk);
+ goto put_spi;
+ }
+ spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi");
+ if (IS_ERR(spfi->spfi_clk)) {
+ ret = PTR_ERR(spfi->spfi_clk);
+ goto put_spi;
+ }
+
+ ret = clk_prepare_enable(spfi->sys_clk);
+ if (ret)
+ goto put_spi;
+ ret = clk_prepare_enable(spfi->spfi_clk);
+ if (ret)
+ goto disable_pclk;
+
+ spfi_reset(spfi);
+ /*
+ * Only enable the error (IACCESS) interrupt. In PIO mode we'll
+ * poll the status of the FIFOs.
+ */
+ spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
+
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
+ if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
+ master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
+ master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
+ master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
+
+ /*
+ * Maximum speed supported by spfi is limited to the lower value
+ * between 1/4 of the SPFI clock or to "spfi-max-frequency"
+ * defined in the device tree.
+ * If no value is defined in the device tree assume the maximum
+ * speed supported to be 1/4 of the SPFI clock.
+ */
+ if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency",
+ &max_speed_hz)) {
+ if (master->max_speed_hz > max_speed_hz)
+ master->max_speed_hz = max_speed_hz;
+ }
+
+ master->transfer_one = img_spfi_transfer_one;
+ master->prepare_message = img_spfi_prepare;
+ master->unprepare_message = img_spfi_unprepare;
+ master->handle_err = img_spfi_handle_err;
+ master->use_gpio_descriptors = true;
+
+ spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
+ if (IS_ERR(spfi->tx_ch)) {
+ ret = PTR_ERR(spfi->tx_ch);
+ spfi->tx_ch = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto disable_pm;
+ }
+
+ spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
+ if (IS_ERR(spfi->rx_ch)) {
+ ret = PTR_ERR(spfi->rx_ch);
+ spfi->rx_ch = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto disable_pm;
+ }
+
+ if (!spfi->tx_ch || !spfi->rx_ch) {
+ if (spfi->tx_ch)
+ dma_release_channel(spfi->tx_ch);
+ if (spfi->rx_ch)
+ dma_release_channel(spfi->rx_ch);
+ spfi->tx_ch = NULL;
+ spfi->rx_ch = NULL;
+ dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
+ } else {
+ master->dma_tx = spfi->tx_ch;
+ master->dma_rx = spfi->rx_ch;
+ master->can_dma = img_spfi_can_dma;
+ }
+
+ pm_runtime_set_active(spfi->dev);
+ pm_runtime_enable(spfi->dev);
+
+ ret = devm_spi_register_master(spfi->dev, master);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(spfi->dev);
+ if (spfi->rx_ch)
+ dma_release_channel(spfi->rx_ch);
+ if (spfi->tx_ch)
+ dma_release_channel(spfi->tx_ch);
+ clk_disable_unprepare(spfi->spfi_clk);
+disable_pclk:
+ clk_disable_unprepare(spfi->sys_clk);
+put_spi:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int img_spfi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+
+ if (spfi->tx_ch)
+ dma_release_channel(spfi->tx_ch);
+ if (spfi->rx_ch)
+ dma_release_channel(spfi->rx_ch);
+
+ pm_runtime_disable(spfi->dev);
+ if (!pm_runtime_status_suspended(spfi->dev)) {
+ clk_disable_unprepare(spfi->spfi_clk);
+ clk_disable_unprepare(spfi->sys_clk);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int img_spfi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spfi->spfi_clk);
+ clk_disable_unprepare(spfi->sys_clk);
+
+ return 0;
+}
+
+static int img_spfi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(spfi->sys_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(spfi->spfi_clk);
+ if (ret) {
+ clk_disable_unprepare(spfi->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int img_spfi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int img_spfi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+ spfi_reset(spfi);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops img_spfi_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume)
+};
+
+static const struct of_device_id img_spfi_of_match[] = {
+ { .compatible = "img,spfi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, img_spfi_of_match);
+
+static struct platform_driver img_spfi_driver = {
+ .driver = {
+ .name = "img-spfi",
+ .pm = &img_spfi_pm_ops,
+ .of_match_table = of_match_ptr(img_spfi_of_match),
+ },
+ .probe = img_spfi_probe,
+ .remove = img_spfi_remove,
+};
+module_platform_driver(img_spfi_driver);
+
+MODULE_DESCRIPTION("IMG SPFI controller driver");
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
new file mode 100644
index 000000000..2c660a95c
--- /dev/null
+++ b/drivers/spi/spi-imx.c
@@ -0,0 +1,1960 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+// Copyright (C) 2008 Juergen Beisert
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+
+#include <linux/dma/imx-dma.h>
+
+#define DRIVER_NAME "spi_imx"
+
+static bool use_dma = true;
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
+
+/* define polling limits */
+static unsigned int polling_limit_us = 30;
+module_param(polling_limit_us, uint, 0664);
+MODULE_PARM_DESC(polling_limit_us,
+ "time in us to run a transfer in polling mode\n");
+
+#define MXC_RPM_TIMEOUT 2000 /* 2000ms */
+
+#define MXC_CSPIRXDATA 0x00
+#define MXC_CSPITXDATA 0x04
+#define MXC_CSPICTRL 0x08
+#define MXC_CSPIINT 0x0c
+#define MXC_RESET 0x1c
+
+/* generic defines to abstract from the different register layouts */
+#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
+#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
+#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
+
+/* The maximum bytes that a sdma BD can transfer. */
+#define MAX_SDMA_BD_BYTES (1 << 15)
+#define MX51_ECSPI_CTRL_MAX_BURST 512
+/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
+#define MX53_MAX_TRANSFER_BYTES 512
+
+enum spi_imx_devtype {
+ IMX1_CSPI,
+ IMX21_CSPI,
+ IMX27_CSPI,
+ IMX31_CSPI,
+ IMX35_CSPI, /* CSPI on all i.mx except above */
+ IMX51_ECSPI, /* ECSPI on i.mx51 */
+ IMX53_ECSPI, /* ECSPI on i.mx53 and later */
+};
+
+struct spi_imx_data;
+
+struct spi_imx_devtype_data {
+ void (*intctrl)(struct spi_imx_data *spi_imx, int enable);
+ int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg);
+ int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi);
+ void (*trigger)(struct spi_imx_data *spi_imx);
+ int (*rx_available)(struct spi_imx_data *spi_imx);
+ void (*reset)(struct spi_imx_data *spi_imx);
+ void (*setup_wml)(struct spi_imx_data *spi_imx);
+ void (*disable)(struct spi_imx_data *spi_imx);
+ void (*disable_dma)(struct spi_imx_data *spi_imx);
+ bool has_dmamode;
+ bool has_slavemode;
+ unsigned int fifo_size;
+ bool dynamic_burst;
+ /*
+ * ERR009165 fixed or not:
+ * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
+ */
+ bool tx_glitch_fixed;
+ enum spi_imx_devtype devtype;
+};
+
+struct spi_imx_data {
+ struct spi_controller *controller;
+ struct device *dev;
+
+ struct completion xfer_done;
+ void __iomem *base;
+ unsigned long base_phys;
+
+ struct clk *clk_per;
+ struct clk *clk_ipg;
+ unsigned long spi_clk;
+ unsigned int spi_bus_clk;
+
+ unsigned int bits_per_word;
+ unsigned int spi_drctl;
+
+ unsigned int count, remainder;
+ void (*tx)(struct spi_imx_data *spi_imx);
+ void (*rx)(struct spi_imx_data *spi_imx);
+ void *rx_buf;
+ const void *tx_buf;
+ unsigned int txfifo; /* number of words pushed in tx FIFO */
+ unsigned int dynamic_burst;
+ bool rx_only;
+
+ /* Slave mode */
+ bool slave_mode;
+ bool slave_aborted;
+ unsigned int slave_burst;
+
+ /* DMA */
+ bool usedma;
+ u32 wml;
+ struct completion dma_rx_completion;
+ struct completion dma_tx_completion;
+
+ const struct spi_imx_devtype_data *devtype_data;
+};
+
+static inline int is_imx27_cspi(struct spi_imx_data *d)
+{
+ return d->devtype_data->devtype == IMX27_CSPI;
+}
+
+static inline int is_imx35_cspi(struct spi_imx_data *d)
+{
+ return d->devtype_data->devtype == IMX35_CSPI;
+}
+
+static inline int is_imx51_ecspi(struct spi_imx_data *d)
+{
+ return d->devtype_data->devtype == IMX51_ECSPI;
+}
+
+static inline int is_imx53_ecspi(struct spi_imx_data *d)
+{
+ return d->devtype_data->devtype == IMX53_ECSPI;
+}
+
+#define MXC_SPI_BUF_RX(type) \
+static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
+{ \
+ unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
+ \
+ if (spi_imx->rx_buf) { \
+ *(type *)spi_imx->rx_buf = val; \
+ spi_imx->rx_buf += sizeof(type); \
+ } \
+ \
+ spi_imx->remainder -= sizeof(type); \
+}
+
+#define MXC_SPI_BUF_TX(type) \
+static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
+{ \
+ type val = 0; \
+ \
+ if (spi_imx->tx_buf) { \
+ val = *(type *)spi_imx->tx_buf; \
+ spi_imx->tx_buf += sizeof(type); \
+ } \
+ \
+ spi_imx->count -= sizeof(type); \
+ \
+ writel(val, spi_imx->base + MXC_CSPITXDATA); \
+}
+
+MXC_SPI_BUF_RX(u8)
+MXC_SPI_BUF_TX(u8)
+MXC_SPI_BUF_RX(u16)
+MXC_SPI_BUF_TX(u16)
+MXC_SPI_BUF_RX(u32)
+MXC_SPI_BUF_TX(u32)
+
+/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
+ * (which is currently not the case in this driver)
+ */
+static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
+ 256, 384, 512, 768, 1024};
+
+/* MX21, MX27 */
+static unsigned int spi_imx_clkdiv_1(unsigned int fin,
+ unsigned int fspi, unsigned int max, unsigned int *fres)
+{
+ int i;
+
+ for (i = 2; i < max; i++)
+ if (fspi * mxc_clkdivs[i] >= fin)
+ break;
+
+ *fres = fin / mxc_clkdivs[i];
+ return i;
+}
+
+/* MX1, MX31, MX35, MX51 CSPI */
+static unsigned int spi_imx_clkdiv_2(unsigned int fin,
+ unsigned int fspi, unsigned int *fres)
+{
+ int i, div = 4;
+
+ for (i = 0; i < 7; i++) {
+ if (fspi * div >= fin)
+ goto out;
+ div <<= 1;
+ }
+
+out:
+ *fres = fin / div;
+ return i;
+}
+
+static int spi_imx_bytes_per_word(const int bits_per_word)
+{
+ if (bits_per_word <= 8)
+ return 1;
+ else if (bits_per_word <= 16)
+ return 2;
+ else
+ return 4;
+}
+
+static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
+
+ if (!use_dma || controller->fallback)
+ return false;
+
+ if (!controller->dma_rx)
+ return false;
+
+ if (spi_imx->slave_mode)
+ return false;
+
+ if (transfer->len < spi_imx->devtype_data->fifo_size)
+ return false;
+
+ spi_imx->dynamic_burst = 0;
+
+ return true;
+}
+
+/*
+ * Note the number of natively supported chip selects for MX51 is 4. Some
+ * devices may have less actual SS pins but the register map supports 4. When
+ * using gpio chip selects the cs values passed into the macros below can go
+ * outside the range 0 - 3. We therefore need to limit the cs value to avoid
+ * corrupting bits outside the allocated locations.
+ *
+ * The simplest way to do this is to just mask the cs bits to 2 bits. This
+ * still allows all 4 native chip selects to work as well as gpio chip selects
+ * (which can use any of the 4 chip select configurations).
+ */
+
+#define MX51_ECSPI_CTRL 0x08
+#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
+#define MX51_ECSPI_CTRL_XCH (1 << 2)
+#define MX51_ECSPI_CTRL_SMC (1 << 3)
+#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
+#define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
+#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
+#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
+#define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18)
+#define MX51_ECSPI_CTRL_BL_OFFSET 20
+#define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
+
+#define MX51_ECSPI_CONFIG 0x0c
+#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0))
+#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4))
+#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8))
+#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12))
+#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20))
+
+#define MX51_ECSPI_INT 0x10
+#define MX51_ECSPI_INT_TEEN (1 << 0)
+#define MX51_ECSPI_INT_RREN (1 << 3)
+#define MX51_ECSPI_INT_RDREN (1 << 4)
+
+#define MX51_ECSPI_DMA 0x14
+#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
+#define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
+#define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
+
+#define MX51_ECSPI_DMA_TEDEN (1 << 7)
+#define MX51_ECSPI_DMA_RXDEN (1 << 23)
+#define MX51_ECSPI_DMA_RXTDEN (1 << 31)
+
+#define MX51_ECSPI_STAT 0x18
+#define MX51_ECSPI_STAT_RR (1 << 3)
+
+#define MX51_ECSPI_TESTREG 0x20
+#define MX51_ECSPI_TESTREG_LBC BIT(31)
+
+static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
+{
+ unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
+
+ if (spi_imx->rx_buf) {
+#ifdef __LITTLE_ENDIAN
+ unsigned int bytes_per_word;
+
+ bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
+ if (bytes_per_word == 1)
+ swab32s(&val);
+ else if (bytes_per_word == 2)
+ swahw32s(&val);
+#endif
+ *(u32 *)spi_imx->rx_buf = val;
+ spi_imx->rx_buf += sizeof(u32);
+ }
+
+ spi_imx->remainder -= sizeof(u32);
+}
+
+static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
+{
+ int unaligned;
+ u32 val;
+
+ unaligned = spi_imx->remainder % 4;
+
+ if (!unaligned) {
+ spi_imx_buf_rx_swap_u32(spi_imx);
+ return;
+ }
+
+ if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
+ spi_imx_buf_rx_u16(spi_imx);
+ return;
+ }
+
+ val = readl(spi_imx->base + MXC_CSPIRXDATA);
+
+ while (unaligned--) {
+ if (spi_imx->rx_buf) {
+ *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
+ spi_imx->rx_buf++;
+ }
+ spi_imx->remainder--;
+ }
+}
+
+static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
+{
+ u32 val = 0;
+#ifdef __LITTLE_ENDIAN
+ unsigned int bytes_per_word;
+#endif
+
+ if (spi_imx->tx_buf) {
+ val = *(u32 *)spi_imx->tx_buf;
+ spi_imx->tx_buf += sizeof(u32);
+ }
+
+ spi_imx->count -= sizeof(u32);
+#ifdef __LITTLE_ENDIAN
+ bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
+
+ if (bytes_per_word == 1)
+ swab32s(&val);
+ else if (bytes_per_word == 2)
+ swahw32s(&val);
+#endif
+ writel(val, spi_imx->base + MXC_CSPITXDATA);
+}
+
+static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
+{
+ int unaligned;
+ u32 val = 0;
+
+ unaligned = spi_imx->count % 4;
+
+ if (!unaligned) {
+ spi_imx_buf_tx_swap_u32(spi_imx);
+ return;
+ }
+
+ if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
+ spi_imx_buf_tx_u16(spi_imx);
+ return;
+ }
+
+ while (unaligned--) {
+ if (spi_imx->tx_buf) {
+ val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
+ spi_imx->tx_buf++;
+ }
+ spi_imx->count--;
+ }
+
+ writel(val, spi_imx->base + MXC_CSPITXDATA);
+}
+
+static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
+{
+ u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
+
+ if (spi_imx->rx_buf) {
+ int n_bytes = spi_imx->slave_burst % sizeof(val);
+
+ if (!n_bytes)
+ n_bytes = sizeof(val);
+
+ memcpy(spi_imx->rx_buf,
+ ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
+
+ spi_imx->rx_buf += n_bytes;
+ spi_imx->slave_burst -= n_bytes;
+ }
+
+ spi_imx->remainder -= sizeof(u32);
+}
+
+static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
+{
+ u32 val = 0;
+ int n_bytes = spi_imx->count % sizeof(val);
+
+ if (!n_bytes)
+ n_bytes = sizeof(val);
+
+ if (spi_imx->tx_buf) {
+ memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
+ spi_imx->tx_buf, n_bytes);
+ val = cpu_to_be32(val);
+ spi_imx->tx_buf += n_bytes;
+ }
+
+ spi_imx->count -= n_bytes;
+
+ writel(val, spi_imx->base + MXC_CSPITXDATA);
+}
+
+/* MX51 eCSPI */
+static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
+ unsigned int fspi, unsigned int *fres)
+{
+ /*
+ * there are two 4-bit dividers, the pre-divider divides by
+ * $pre, the post-divider by 2^$post
+ */
+ unsigned int pre, post;
+ unsigned int fin = spi_imx->spi_clk;
+
+ fspi = min(fspi, fin);
+
+ post = fls(fin) - fls(fspi);
+ if (fin > fspi << post)
+ post++;
+
+ /* now we have: (fin <= fspi << post) with post being minimal */
+
+ post = max(4U, post) - 4;
+ if (unlikely(post > 0xf)) {
+ dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
+ fspi, fin);
+ return 0xff;
+ }
+
+ pre = DIV_ROUND_UP(fin, fspi << post) - 1;
+
+ dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
+ __func__, fin, fspi, post, pre);
+
+ /* Resulting frequency for the SCLK line. */
+ *fres = (fin / (pre + 1)) >> post;
+
+ return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
+ (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
+}
+
+static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+ unsigned int val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX51_ECSPI_INT_TEEN;
+
+ if (enable & MXC_INT_RR)
+ val |= MX51_ECSPI_INT_RREN;
+
+ if (enable & MXC_INT_RDR)
+ val |= MX51_ECSPI_INT_RDREN;
+
+ writel(val, spi_imx->base + MX51_ECSPI_INT);
+}
+
+static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
+{
+ u32 reg;
+
+ reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ reg |= MX51_ECSPI_CTRL_XCH;
+ writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
+}
+
+static void mx51_disable_dma(struct spi_imx_data *spi_imx)
+{
+ writel(0, spi_imx->base + MX51_ECSPI_DMA);
+}
+
+static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
+{
+ u32 ctrl;
+
+ ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+}
+
+static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
+ struct spi_message *msg)
+{
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *xfer;
+ u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
+ u32 min_speed_hz = ~0U;
+ u32 testreg, delay;
+ u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
+ u32 current_cfg = cfg;
+
+ /* set Master or Slave mode */
+ if (spi_imx->slave_mode)
+ ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
+ else
+ ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
+
+ /*
+ * Enable SPI_RDY handling (falling edge/level triggered).
+ */
+ if (spi->mode & SPI_READY)
+ ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
+
+ /* set chip select to use */
+ ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
+
+ /*
+ * The ctrl register must be written first, with the EN bit set other
+ * registers must not be written to.
+ */
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+
+ testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
+ if (spi->mode & SPI_LOOP)
+ testreg |= MX51_ECSPI_TESTREG_LBC;
+ else
+ testreg &= ~MX51_ECSPI_TESTREG_LBC;
+ writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
+
+ /*
+ * eCSPI burst completion by Chip Select signal in Slave mode
+ * is not functional for imx53 Soc, config SPI burst completed when
+ * BURST_LENGTH + 1 bits are received
+ */
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+ else
+ cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+
+ if (spi->mode & SPI_CPOL) {
+ cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
+ cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
+ } else {
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
+ }
+
+ if (spi->mode & SPI_CS_HIGH)
+ cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
+ else
+ cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
+
+ if (cfg == current_cfg)
+ return 0;
+
+ writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
+
+ /*
+ * Wait until the changes in the configuration register CONFIGREG
+ * propagate into the hardware. It takes exactly one tick of the
+ * SCLK clock, but we will wait two SCLK clock just to be sure. The
+ * effect of the delay it takes for the hardware to apply changes
+ * is noticable if the SCLK clock run very slow. In such a case, if
+ * the polarity of SCLK should be inverted, the GPIO ChipSelect might
+ * be asserted before the SCLK polarity changes, which would disrupt
+ * the SPI communication as the device on the other end would consider
+ * the change of SCLK polarity as a clock tick already.
+ *
+ * Because spi_imx->spi_bus_clk is only set in prepare_message
+ * callback, iterate over all the transfers in spi_message, find the
+ * one with lowest bus frequency, and use that bus frequency for the
+ * delay calculation. In case all transfers have speed_hz == 0, then
+ * min_speed_hz is ~0 and the resulting delay is zero.
+ */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!xfer->speed_hz)
+ continue;
+ min_speed_hz = min(xfer->speed_hz, min_speed_hz);
+ }
+
+ delay = (2 * 1000000) / min_speed_hz;
+ if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
+ udelay(delay);
+ else /* SCLK is _very_ slow */
+ usleep_range(delay, delay + 10);
+
+ return 0;
+}
+
+static void mx51_configure_cpha(struct spi_imx_data *spi_imx,
+ struct spi_device *spi)
+{
+ bool cpha = (spi->mode & SPI_CPHA);
+ bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only;
+ u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
+
+ /* Flip cpha logical value iff flip_cpha */
+ cpha ^= flip_cpha;
+
+ if (cpha)
+ cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
+ else
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
+
+ writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
+}
+
+static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
+ struct spi_device *spi)
+{
+ u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ u32 clk;
+
+ /* Clear BL field and set the right value */
+ ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ ctrl |= (spi_imx->slave_burst * 8 - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+ else
+ ctrl |= (spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+
+ /* set clock speed */
+ ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
+ 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
+ ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
+ spi_imx->spi_bus_clk = clk;
+
+ mx51_configure_cpha(spi_imx, spi);
+
+ /*
+ * ERR009165: work in XHC mode instead of SMC as PIO on the chips
+ * before i.mx6ul.
+ */
+ if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
+ ctrl |= MX51_ECSPI_CTRL_SMC;
+ else
+ ctrl &= ~MX51_ECSPI_CTRL_SMC;
+
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+
+ return 0;
+}
+
+static void mx51_setup_wml(struct spi_imx_data *spi_imx)
+{
+ u32 tx_wml = 0;
+
+ if (spi_imx->devtype_data->tx_glitch_fixed)
+ tx_wml = spi_imx->wml;
+ /*
+ * Configure the DMA register: setup the watermark
+ * and enable DMA request.
+ */
+ writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
+ MX51_ECSPI_DMA_TX_WML(tx_wml) |
+ MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
+ MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
+ MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
+}
+
+static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
+{
+ return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
+}
+
+static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
+{
+ /* drain receive buffer */
+ while (mx51_ecspi_rx_available(spi_imx))
+ readl(spi_imx->base + MXC_CSPIRXDATA);
+}
+
+#define MX31_INTREG_TEEN (1 << 0)
+#define MX31_INTREG_RREN (1 << 3)
+
+#define MX31_CSPICTRL_ENABLE (1 << 0)
+#define MX31_CSPICTRL_MASTER (1 << 1)
+#define MX31_CSPICTRL_XCH (1 << 2)
+#define MX31_CSPICTRL_SMC (1 << 3)
+#define MX31_CSPICTRL_POL (1 << 4)
+#define MX31_CSPICTRL_PHA (1 << 5)
+#define MX31_CSPICTRL_SSCTL (1 << 6)
+#define MX31_CSPICTRL_SSPOL (1 << 7)
+#define MX31_CSPICTRL_BC_SHIFT 8
+#define MX35_CSPICTRL_BL_SHIFT 20
+#define MX31_CSPICTRL_CS_SHIFT 24
+#define MX35_CSPICTRL_CS_SHIFT 12
+#define MX31_CSPICTRL_DR_SHIFT 16
+
+#define MX31_CSPI_DMAREG 0x10
+#define MX31_DMAREG_RH_DEN (1<<4)
+#define MX31_DMAREG_TH_DEN (1<<1)
+
+#define MX31_CSPISTATUS 0x14
+#define MX31_STATUS_RR (1 << 3)
+
+#define MX31_CSPI_TESTREG 0x1C
+#define MX31_TEST_LBC (1 << 14)
+
+/* These functions also work for the i.MX35, but be aware that
+ * the i.MX35 has a slightly different register layout for bits
+ * we do not use here.
+ */
+static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+ unsigned int val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX31_INTREG_TEEN;
+ if (enable & MXC_INT_RR)
+ val |= MX31_INTREG_RREN;
+
+ writel(val, spi_imx->base + MXC_CSPIINT);
+}
+
+static void mx31_trigger(struct spi_imx_data *spi_imx)
+{
+ unsigned int reg;
+
+ reg = readl(spi_imx->base + MXC_CSPICTRL);
+ reg |= MX31_CSPICTRL_XCH;
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+}
+
+static int mx31_prepare_message(struct spi_imx_data *spi_imx,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
+ struct spi_device *spi)
+{
+ unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
+ unsigned int clk;
+
+ reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
+ MX31_CSPICTRL_DR_SHIFT;
+ spi_imx->spi_bus_clk = clk;
+
+ if (is_imx35_cspi(spi_imx)) {
+ reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
+ reg |= MX31_CSPICTRL_SSCTL;
+ } else {
+ reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
+ }
+
+ if (spi->mode & SPI_CPHA)
+ reg |= MX31_CSPICTRL_PHA;
+ if (spi->mode & SPI_CPOL)
+ reg |= MX31_CSPICTRL_POL;
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= MX31_CSPICTRL_SSPOL;
+ if (!spi->cs_gpiod)
+ reg |= (spi->chip_select) <<
+ (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
+ MX31_CSPICTRL_CS_SHIFT);
+
+ if (spi_imx->usedma)
+ reg |= MX31_CSPICTRL_SMC;
+
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+
+ reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
+ if (spi->mode & SPI_LOOP)
+ reg |= MX31_TEST_LBC;
+ else
+ reg &= ~MX31_TEST_LBC;
+ writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
+
+ if (spi_imx->usedma) {
+ /*
+ * configure DMA requests when RXFIFO is half full and
+ * when TXFIFO is half empty
+ */
+ writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
+ spi_imx->base + MX31_CSPI_DMAREG);
+ }
+
+ return 0;
+}
+
+static int mx31_rx_available(struct spi_imx_data *spi_imx)
+{
+ return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
+}
+
+static void mx31_reset(struct spi_imx_data *spi_imx)
+{
+ /* drain receive buffer */
+ while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
+ readl(spi_imx->base + MXC_CSPIRXDATA);
+}
+
+#define MX21_INTREG_RR (1 << 4)
+#define MX21_INTREG_TEEN (1 << 9)
+#define MX21_INTREG_RREN (1 << 13)
+
+#define MX21_CSPICTRL_POL (1 << 5)
+#define MX21_CSPICTRL_PHA (1 << 6)
+#define MX21_CSPICTRL_SSPOL (1 << 8)
+#define MX21_CSPICTRL_XCH (1 << 9)
+#define MX21_CSPICTRL_ENABLE (1 << 10)
+#define MX21_CSPICTRL_MASTER (1 << 11)
+#define MX21_CSPICTRL_DR_SHIFT 14
+#define MX21_CSPICTRL_CS_SHIFT 19
+
+static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+ unsigned int val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX21_INTREG_TEEN;
+ if (enable & MXC_INT_RR)
+ val |= MX21_INTREG_RREN;
+
+ writel(val, spi_imx->base + MXC_CSPIINT);
+}
+
+static void mx21_trigger(struct spi_imx_data *spi_imx)
+{
+ unsigned int reg;
+
+ reg = readl(spi_imx->base + MXC_CSPICTRL);
+ reg |= MX21_CSPICTRL_XCH;
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+}
+
+static int mx21_prepare_message(struct spi_imx_data *spi_imx,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
+ struct spi_device *spi)
+{
+ unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
+ unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
+ unsigned int clk;
+
+ reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
+ << MX21_CSPICTRL_DR_SHIFT;
+ spi_imx->spi_bus_clk = clk;
+
+ reg |= spi_imx->bits_per_word - 1;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= MX21_CSPICTRL_PHA;
+ if (spi->mode & SPI_CPOL)
+ reg |= MX21_CSPICTRL_POL;
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= MX21_CSPICTRL_SSPOL;
+ if (!spi->cs_gpiod)
+ reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
+
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+
+ return 0;
+}
+
+static int mx21_rx_available(struct spi_imx_data *spi_imx)
+{
+ return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
+}
+
+static void mx21_reset(struct spi_imx_data *spi_imx)
+{
+ writel(1, spi_imx->base + MXC_RESET);
+}
+
+#define MX1_INTREG_RR (1 << 3)
+#define MX1_INTREG_TEEN (1 << 8)
+#define MX1_INTREG_RREN (1 << 11)
+
+#define MX1_CSPICTRL_POL (1 << 4)
+#define MX1_CSPICTRL_PHA (1 << 5)
+#define MX1_CSPICTRL_XCH (1 << 8)
+#define MX1_CSPICTRL_ENABLE (1 << 9)
+#define MX1_CSPICTRL_MASTER (1 << 10)
+#define MX1_CSPICTRL_DR_SHIFT 13
+
+static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+ unsigned int val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX1_INTREG_TEEN;
+ if (enable & MXC_INT_RR)
+ val |= MX1_INTREG_RREN;
+
+ writel(val, spi_imx->base + MXC_CSPIINT);
+}
+
+static void mx1_trigger(struct spi_imx_data *spi_imx)
+{
+ unsigned int reg;
+
+ reg = readl(spi_imx->base + MXC_CSPICTRL);
+ reg |= MX1_CSPICTRL_XCH;
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+}
+
+static int mx1_prepare_message(struct spi_imx_data *spi_imx,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
+ struct spi_device *spi)
+{
+ unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
+ unsigned int clk;
+
+ reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
+ MX1_CSPICTRL_DR_SHIFT;
+ spi_imx->spi_bus_clk = clk;
+
+ reg |= spi_imx->bits_per_word - 1;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= MX1_CSPICTRL_PHA;
+ if (spi->mode & SPI_CPOL)
+ reg |= MX1_CSPICTRL_POL;
+
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+
+ return 0;
+}
+
+static int mx1_rx_available(struct spi_imx_data *spi_imx)
+{
+ return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
+}
+
+static void mx1_reset(struct spi_imx_data *spi_imx)
+{
+ writel(1, spi_imx->base + MXC_RESET);
+}
+
+static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
+ .intctrl = mx1_intctrl,
+ .prepare_message = mx1_prepare_message,
+ .prepare_transfer = mx1_prepare_transfer,
+ .trigger = mx1_trigger,
+ .rx_available = mx1_rx_available,
+ .reset = mx1_reset,
+ .fifo_size = 8,
+ .has_dmamode = false,
+ .dynamic_burst = false,
+ .has_slavemode = false,
+ .devtype = IMX1_CSPI,
+};
+
+static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
+ .intctrl = mx21_intctrl,
+ .prepare_message = mx21_prepare_message,
+ .prepare_transfer = mx21_prepare_transfer,
+ .trigger = mx21_trigger,
+ .rx_available = mx21_rx_available,
+ .reset = mx21_reset,
+ .fifo_size = 8,
+ .has_dmamode = false,
+ .dynamic_burst = false,
+ .has_slavemode = false,
+ .devtype = IMX21_CSPI,
+};
+
+static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
+ /* i.mx27 cspi shares the functions with i.mx21 one */
+ .intctrl = mx21_intctrl,
+ .prepare_message = mx21_prepare_message,
+ .prepare_transfer = mx21_prepare_transfer,
+ .trigger = mx21_trigger,
+ .rx_available = mx21_rx_available,
+ .reset = mx21_reset,
+ .fifo_size = 8,
+ .has_dmamode = false,
+ .dynamic_burst = false,
+ .has_slavemode = false,
+ .devtype = IMX27_CSPI,
+};
+
+static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
+ .intctrl = mx31_intctrl,
+ .prepare_message = mx31_prepare_message,
+ .prepare_transfer = mx31_prepare_transfer,
+ .trigger = mx31_trigger,
+ .rx_available = mx31_rx_available,
+ .reset = mx31_reset,
+ .fifo_size = 8,
+ .has_dmamode = false,
+ .dynamic_burst = false,
+ .has_slavemode = false,
+ .devtype = IMX31_CSPI,
+};
+
+static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
+ /* i.mx35 and later cspi shares the functions with i.mx31 one */
+ .intctrl = mx31_intctrl,
+ .prepare_message = mx31_prepare_message,
+ .prepare_transfer = mx31_prepare_transfer,
+ .trigger = mx31_trigger,
+ .rx_available = mx31_rx_available,
+ .reset = mx31_reset,
+ .fifo_size = 8,
+ .has_dmamode = true,
+ .dynamic_burst = false,
+ .has_slavemode = false,
+ .devtype = IMX35_CSPI,
+};
+
+static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
+ .intctrl = mx51_ecspi_intctrl,
+ .prepare_message = mx51_ecspi_prepare_message,
+ .prepare_transfer = mx51_ecspi_prepare_transfer,
+ .trigger = mx51_ecspi_trigger,
+ .rx_available = mx51_ecspi_rx_available,
+ .reset = mx51_ecspi_reset,
+ .setup_wml = mx51_setup_wml,
+ .disable_dma = mx51_disable_dma,
+ .fifo_size = 64,
+ .has_dmamode = true,
+ .dynamic_burst = true,
+ .has_slavemode = true,
+ .disable = mx51_ecspi_disable,
+ .devtype = IMX51_ECSPI,
+};
+
+static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
+ .intctrl = mx51_ecspi_intctrl,
+ .prepare_message = mx51_ecspi_prepare_message,
+ .prepare_transfer = mx51_ecspi_prepare_transfer,
+ .trigger = mx51_ecspi_trigger,
+ .rx_available = mx51_ecspi_rx_available,
+ .disable_dma = mx51_disable_dma,
+ .reset = mx51_ecspi_reset,
+ .fifo_size = 64,
+ .has_dmamode = true,
+ .has_slavemode = true,
+ .disable = mx51_ecspi_disable,
+ .devtype = IMX53_ECSPI,
+};
+
+static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
+ .intctrl = mx51_ecspi_intctrl,
+ .prepare_message = mx51_ecspi_prepare_message,
+ .prepare_transfer = mx51_ecspi_prepare_transfer,
+ .trigger = mx51_ecspi_trigger,
+ .rx_available = mx51_ecspi_rx_available,
+ .reset = mx51_ecspi_reset,
+ .setup_wml = mx51_setup_wml,
+ .fifo_size = 64,
+ .has_dmamode = true,
+ .dynamic_burst = true,
+ .has_slavemode = true,
+ .tx_glitch_fixed = true,
+ .disable = mx51_ecspi_disable,
+ .devtype = IMX51_ECSPI,
+};
+
+static const struct of_device_id spi_imx_dt_ids[] = {
+ { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
+ { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
+ { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
+ { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
+ { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
+ { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
+ { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
+ { .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
+
+static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
+{
+ u32 ctrl;
+
+ ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
+ ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+}
+
+static void spi_imx_push(struct spi_imx_data *spi_imx)
+{
+ unsigned int burst_len;
+
+ /*
+ * Reload the FIFO when the remaining bytes to be transferred in the
+ * current burst is 0. This only applies when bits_per_word is a
+ * multiple of 8.
+ */
+ if (!spi_imx->remainder) {
+ if (spi_imx->dynamic_burst) {
+
+ /* We need to deal unaligned data first */
+ burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
+
+ if (!burst_len)
+ burst_len = MX51_ECSPI_CTRL_MAX_BURST;
+
+ spi_imx_set_burst_len(spi_imx, burst_len * 8);
+
+ spi_imx->remainder = burst_len;
+ } else {
+ spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
+ }
+ }
+
+ while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
+ if (!spi_imx->count)
+ break;
+ if (spi_imx->dynamic_burst &&
+ spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
+ break;
+ spi_imx->tx(spi_imx);
+ spi_imx->txfifo++;
+ }
+
+ if (!spi_imx->slave_mode)
+ spi_imx->devtype_data->trigger(spi_imx);
+}
+
+static irqreturn_t spi_imx_isr(int irq, void *dev_id)
+{
+ struct spi_imx_data *spi_imx = dev_id;
+
+ while (spi_imx->txfifo &&
+ spi_imx->devtype_data->rx_available(spi_imx)) {
+ spi_imx->rx(spi_imx);
+ spi_imx->txfifo--;
+ }
+
+ if (spi_imx->count) {
+ spi_imx_push(spi_imx);
+ return IRQ_HANDLED;
+ }
+
+ if (spi_imx->txfifo) {
+ /* No data left to push, but still waiting for rx data,
+ * enable receive data available interrupt.
+ */
+ spi_imx->devtype_data->intctrl(
+ spi_imx, MXC_INT_RR);
+ return IRQ_HANDLED;
+ }
+
+ spi_imx->devtype_data->intctrl(spi_imx, 0);
+ complete(&spi_imx->xfer_done);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_imx_dma_configure(struct spi_controller *controller)
+{
+ int ret;
+ enum dma_slave_buswidth buswidth;
+ struct dma_slave_config rx = {}, tx = {};
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
+
+ switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
+ case 4:
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ case 2:
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 1:
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tx.direction = DMA_MEM_TO_DEV;
+ tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
+ tx.dst_addr_width = buswidth;
+ tx.dst_maxburst = spi_imx->wml;
+ ret = dmaengine_slave_config(controller->dma_tx, &tx);
+ if (ret) {
+ dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
+ return ret;
+ }
+
+ rx.direction = DMA_DEV_TO_MEM;
+ rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
+ rx.src_addr_width = buswidth;
+ rx.src_maxburst = spi_imx->wml;
+ ret = dmaengine_slave_config(controller->dma_rx, &rx);
+ if (ret) {
+ dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spi_imx_setupxfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
+
+ if (!t)
+ return 0;
+
+ if (!t->speed_hz) {
+ if (!spi->max_speed_hz) {
+ dev_err(&spi->dev, "no speed_hz provided!\n");
+ return -EINVAL;
+ }
+ dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
+ spi_imx->spi_bus_clk = spi->max_speed_hz;
+ } else
+ spi_imx->spi_bus_clk = t->speed_hz;
+
+ spi_imx->bits_per_word = t->bits_per_word;
+
+ /*
+ * Initialize the functions for transfer. To transfer non byte-aligned
+ * words, we have to use multiple word-size bursts, we can't use
+ * dynamic_burst in that case.
+ */
+ if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
+ !(spi->mode & SPI_CS_WORD) &&
+ (spi_imx->bits_per_word == 8 ||
+ spi_imx->bits_per_word == 16 ||
+ spi_imx->bits_per_word == 32)) {
+
+ spi_imx->rx = spi_imx_buf_rx_swap;
+ spi_imx->tx = spi_imx_buf_tx_swap;
+ spi_imx->dynamic_burst = 1;
+
+ } else {
+ if (spi_imx->bits_per_word <= 8) {
+ spi_imx->rx = spi_imx_buf_rx_u8;
+ spi_imx->tx = spi_imx_buf_tx_u8;
+ } else if (spi_imx->bits_per_word <= 16) {
+ spi_imx->rx = spi_imx_buf_rx_u16;
+ spi_imx->tx = spi_imx_buf_tx_u16;
+ } else {
+ spi_imx->rx = spi_imx_buf_rx_u32;
+ spi_imx->tx = spi_imx_buf_tx_u32;
+ }
+ spi_imx->dynamic_burst = 0;
+ }
+
+ if (spi_imx_can_dma(spi_imx->controller, spi, t))
+ spi_imx->usedma = true;
+ else
+ spi_imx->usedma = false;
+
+ spi_imx->rx_only = ((t->tx_buf == NULL)
+ || (t->tx_buf == spi->controller->dummy_tx));
+
+ if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
+ spi_imx->rx = mx53_ecspi_rx_slave;
+ spi_imx->tx = mx53_ecspi_tx_slave;
+ spi_imx->slave_burst = t->len;
+ }
+
+ spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
+
+ return 0;
+}
+
+static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
+{
+ struct spi_controller *controller = spi_imx->controller;
+
+ if (controller->dma_rx) {
+ dma_release_channel(controller->dma_rx);
+ controller->dma_rx = NULL;
+ }
+
+ if (controller->dma_tx) {
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
+ }
+}
+
+static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
+ struct spi_controller *controller)
+{
+ int ret;
+
+ spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
+
+ /* Prepare for TX DMA: */
+ controller->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(controller->dma_tx)) {
+ ret = PTR_ERR(controller->dma_tx);
+ dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
+ controller->dma_tx = NULL;
+ goto err;
+ }
+
+ /* Prepare for RX : */
+ controller->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(controller->dma_rx)) {
+ ret = PTR_ERR(controller->dma_rx);
+ dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
+ controller->dma_rx = NULL;
+ goto err;
+ }
+
+ init_completion(&spi_imx->dma_rx_completion);
+ init_completion(&spi_imx->dma_tx_completion);
+ controller->can_dma = spi_imx_can_dma;
+ controller->max_dma_len = MAX_SDMA_BD_BYTES;
+ spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX |
+ SPI_CONTROLLER_MUST_TX;
+
+ return 0;
+err:
+ spi_imx_sdma_exit(spi_imx);
+ return ret;
+}
+
+static void spi_imx_dma_rx_callback(void *cookie)
+{
+ struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
+
+ complete(&spi_imx->dma_rx_completion);
+}
+
+static void spi_imx_dma_tx_callback(void *cookie)
+{
+ struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
+
+ complete(&spi_imx->dma_tx_completion);
+}
+
+static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
+{
+ unsigned long timeout = 0;
+
+ /* Time with actual data transfer and CS change delay related to HW */
+ timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
+
+ /* Add extra second for scheduler related activities */
+ timeout += 1;
+
+ /* Double calculated timeout */
+ return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+}
+
+static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
+ struct spi_transfer *transfer)
+{
+ struct dma_async_tx_descriptor *desc_tx, *desc_rx;
+ unsigned long transfer_timeout;
+ unsigned long timeout;
+ struct spi_controller *controller = spi_imx->controller;
+ struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
+ struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
+ unsigned int bytes_per_word, i;
+ int ret;
+
+ /* Get the right burst length from the last sg to ensure no tail data */
+ bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
+ for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
+ if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
+ break;
+ }
+ /* Use 1 as wml in case no available burst length got */
+ if (i == 0)
+ i = 1;
+
+ spi_imx->wml = i;
+
+ ret = spi_imx_dma_configure(controller);
+ if (ret)
+ goto dma_failure_no_start;
+
+ if (!spi_imx->devtype_data->setup_wml) {
+ dev_err(spi_imx->dev, "No setup_wml()?\n");
+ ret = -EINVAL;
+ goto dma_failure_no_start;
+ }
+ spi_imx->devtype_data->setup_wml(spi_imx);
+
+ /*
+ * The TX DMA setup starts the transfer, so make sure RX is configured
+ * before TX.
+ */
+ desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
+ rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ ret = -EINVAL;
+ goto dma_failure_no_start;
+ }
+
+ desc_rx->callback = spi_imx_dma_rx_callback;
+ desc_rx->callback_param = (void *)spi_imx;
+ dmaengine_submit(desc_rx);
+ reinit_completion(&spi_imx->dma_rx_completion);
+ dma_async_issue_pending(controller->dma_rx);
+
+ desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
+ tx->sgl, tx->nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ return -EINVAL;
+ }
+
+ desc_tx->callback = spi_imx_dma_tx_callback;
+ desc_tx->callback_param = (void *)spi_imx;
+ dmaengine_submit(desc_tx);
+ reinit_completion(&spi_imx->dma_tx_completion);
+ dma_async_issue_pending(controller->dma_tx);
+
+ transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
+
+ /* Wait SDMA to finish the data transfer.*/
+ timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ return -ETIMEDOUT;
+ }
+
+ timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(&controller->dev, "I/O Error in DMA RX\n");
+ spi_imx->devtype_data->reset(spi_imx);
+ dmaengine_terminate_all(controller->dma_rx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+/* fallback to pio */
+dma_failure_no_start:
+ transfer->error |= SPI_TRANS_FAIL_NO_START;
+ return ret;
+}
+
+static int spi_imx_pio_transfer(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
+ unsigned long transfer_timeout;
+ unsigned long timeout;
+
+ spi_imx->tx_buf = transfer->tx_buf;
+ spi_imx->rx_buf = transfer->rx_buf;
+ spi_imx->count = transfer->len;
+ spi_imx->txfifo = 0;
+ spi_imx->remainder = 0;
+
+ reinit_completion(&spi_imx->xfer_done);
+
+ spi_imx_push(spi_imx);
+
+ spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
+
+ transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
+
+ timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(&spi->dev, "I/O Error in PIO\n");
+ spi_imx->devtype_data->reset(spi_imx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int spi_imx_poll_transfer(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
+ unsigned long timeout;
+
+ spi_imx->tx_buf = transfer->tx_buf;
+ spi_imx->rx_buf = transfer->rx_buf;
+ spi_imx->count = transfer->len;
+ spi_imx->txfifo = 0;
+ spi_imx->remainder = 0;
+
+ /* fill in the fifo before timeout calculations if we are
+ * interrupted here, then the data is getting transferred by
+ * the HW while we are interrupted
+ */
+ spi_imx_push(spi_imx);
+
+ timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies;
+ while (spi_imx->txfifo) {
+ /* RX */
+ while (spi_imx->txfifo &&
+ spi_imx->devtype_data->rx_available(spi_imx)) {
+ spi_imx->rx(spi_imx);
+ spi_imx->txfifo--;
+ }
+
+ /* TX */
+ if (spi_imx->count) {
+ spi_imx_push(spi_imx);
+ continue;
+ }
+
+ if (spi_imx->txfifo &&
+ time_after(jiffies, timeout)) {
+
+ dev_err_ratelimited(&spi->dev,
+ "timeout period reached: jiffies: %lu- falling back to interrupt mode\n",
+ jiffies - timeout);
+
+ /* fall back to interrupt mode */
+ return spi_imx_pio_transfer(spi, transfer);
+ }
+ }
+
+ return 0;
+}
+
+static int spi_imx_pio_transfer_slave(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
+ int ret = 0;
+
+ if (is_imx53_ecspi(spi_imx) &&
+ transfer->len > MX53_MAX_TRANSFER_BYTES) {
+ dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
+ MX53_MAX_TRANSFER_BYTES);
+ return -EMSGSIZE;
+ }
+
+ spi_imx->tx_buf = transfer->tx_buf;
+ spi_imx->rx_buf = transfer->rx_buf;
+ spi_imx->count = transfer->len;
+ spi_imx->txfifo = 0;
+ spi_imx->remainder = 0;
+
+ reinit_completion(&spi_imx->xfer_done);
+ spi_imx->slave_aborted = false;
+
+ spi_imx_push(spi_imx);
+
+ spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
+
+ if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
+ spi_imx->slave_aborted) {
+ dev_dbg(&spi->dev, "interrupted\n");
+ ret = -EINTR;
+ }
+
+ /* ecspi has a HW issue when works in Slave mode,
+ * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
+ * ECSPI_TXDATA keeps shift out the last word data,
+ * so we have to disable ECSPI when in slave mode after the
+ * transfer completes
+ */
+ if (spi_imx->devtype_data->disable)
+ spi_imx->devtype_data->disable(spi_imx);
+
+ return ret;
+}
+
+static int spi_imx_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
+ unsigned long hz_per_byte, byte_limit;
+
+ spi_imx_setupxfer(spi, transfer);
+ transfer->effective_speed_hz = spi_imx->spi_bus_clk;
+
+ /* flush rxfifo before transfer */
+ while (spi_imx->devtype_data->rx_available(spi_imx))
+ readl(spi_imx->base + MXC_CSPIRXDATA);
+
+ if (spi_imx->slave_mode)
+ return spi_imx_pio_transfer_slave(spi, transfer);
+
+ /*
+ * If we decided in spi_imx_can_dma() that we want to do a DMA
+ * transfer, the SPI transfer has already been mapped, so we
+ * have to do the DMA transfer here.
+ */
+ if (spi_imx->usedma)
+ return spi_imx_dma_transfer(spi_imx, transfer);
+ /*
+ * Calculate the estimated time in us the transfer runs. Find
+ * the number of Hz per byte per polling limit.
+ */
+ hz_per_byte = polling_limit_us ? ((8 + 4) * USEC_PER_SEC) / polling_limit_us : 0;
+ byte_limit = hz_per_byte ? transfer->effective_speed_hz / hz_per_byte : 1;
+
+ /* run in polling mode for short transfers */
+ if (transfer->len < byte_limit)
+ return spi_imx_poll_transfer(spi, transfer);
+
+ return spi_imx_pio_transfer(spi, transfer);
+}
+
+static int spi_imx_setup(struct spi_device *spi)
+{
+ dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
+ spi->mode, spi->bits_per_word, spi->max_speed_hz);
+
+ return 0;
+}
+
+static void spi_imx_cleanup(struct spi_device *spi)
+{
+}
+
+static int
+spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg)
+{
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(spi_imx->dev);
+ if (ret < 0) {
+ dev_err(spi_imx->dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
+ if (ret) {
+ pm_runtime_mark_last_busy(spi_imx->dev);
+ pm_runtime_put_autosuspend(spi_imx->dev);
+ }
+
+ return ret;
+}
+
+static int
+spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg)
+{
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
+
+ pm_runtime_mark_last_busy(spi_imx->dev);
+ pm_runtime_put_autosuspend(spi_imx->dev);
+ return 0;
+}
+
+static int spi_imx_slave_abort(struct spi_controller *controller)
+{
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
+
+ spi_imx->slave_aborted = true;
+ complete(&spi_imx->xfer_done);
+
+ return 0;
+}
+
+static int spi_imx_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_controller *controller;
+ struct spi_imx_data *spi_imx;
+ struct resource *res;
+ int ret, irq, spi_drctl;
+ const struct spi_imx_devtype_data *devtype_data =
+ of_device_get_match_data(&pdev->dev);
+ bool slave_mode;
+ u32 val;
+
+ slave_mode = devtype_data->has_slavemode &&
+ of_property_read_bool(np, "spi-slave");
+ if (slave_mode)
+ controller = spi_alloc_slave(&pdev->dev,
+ sizeof(struct spi_imx_data));
+ else
+ controller = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_imx_data));
+ if (!controller)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
+ if ((ret < 0) || (spi_drctl >= 0x3)) {
+ /* '11' is reserved */
+ spi_drctl = 0;
+ }
+
+ platform_set_drvdata(pdev, controller);
+
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ controller->bus_num = np ? -1 : pdev->id;
+ controller->use_gpio_descriptors = true;
+
+ spi_imx = spi_controller_get_devdata(controller);
+ spi_imx->controller = controller;
+ spi_imx->dev = &pdev->dev;
+ spi_imx->slave_mode = slave_mode;
+
+ spi_imx->devtype_data = devtype_data;
+
+ /*
+ * Get number of chip selects from device properties. This can be
+ * coming from device tree or boardfiles, if it is not defined,
+ * a default value of 3 chip selects will be used, as all the legacy
+ * board files have <= 3 chip selects.
+ */
+ if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
+ controller->num_chipselect = val;
+ else
+ controller->num_chipselect = 3;
+
+ spi_imx->controller->transfer_one = spi_imx_transfer_one;
+ spi_imx->controller->setup = spi_imx_setup;
+ spi_imx->controller->cleanup = spi_imx_cleanup;
+ spi_imx->controller->prepare_message = spi_imx_prepare_message;
+ spi_imx->controller->unprepare_message = spi_imx_unprepare_message;
+ spi_imx->controller->slave_abort = spi_imx_slave_abort;
+ spi_imx->controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS;
+
+ if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
+ is_imx53_ecspi(spi_imx))
+ spi_imx->controller->mode_bits |= SPI_LOOP | SPI_READY;
+
+ if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx))
+ spi_imx->controller->mode_bits |= SPI_RX_CPHA_FLIP;
+
+ if (is_imx51_ecspi(spi_imx) &&
+ device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
+ /*
+ * When using HW-CS implementing SPI_CS_WORD can be done by just
+ * setting the burst length to the word size. This is
+ * considerably faster than manually controlling the CS.
+ */
+ spi_imx->controller->mode_bits |= SPI_CS_WORD;
+
+ spi_imx->spi_drctl = spi_drctl;
+
+ init_completion(&spi_imx->xfer_done);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi_imx->base)) {
+ ret = PTR_ERR(spi_imx->base);
+ goto out_controller_put;
+ }
+ spi_imx->base_phys = res->start;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_controller_put;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
+ dev_name(&pdev->dev), spi_imx);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
+ goto out_controller_put;
+ }
+
+ spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(spi_imx->clk_ipg)) {
+ ret = PTR_ERR(spi_imx->clk_ipg);
+ goto out_controller_put;
+ }
+
+ spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(spi_imx->clk_per)) {
+ ret = PTR_ERR(spi_imx->clk_per);
+ goto out_controller_put;
+ }
+
+ ret = clk_prepare_enable(spi_imx->clk_per);
+ if (ret)
+ goto out_controller_put;
+
+ ret = clk_prepare_enable(spi_imx->clk_ipg);
+ if (ret)
+ goto out_put_per;
+
+ pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(spi_imx->dev);
+ pm_runtime_get_noresume(spi_imx->dev);
+ pm_runtime_set_active(spi_imx->dev);
+ pm_runtime_enable(spi_imx->dev);
+
+ spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
+ /*
+ * Only validated on i.mx35 and i.mx6 now, can remove the constraint
+ * if validated on other chips.
+ */
+ if (spi_imx->devtype_data->has_dmamode) {
+ ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller);
+ if (ret == -EPROBE_DEFER)
+ goto out_runtime_pm_put;
+
+ if (ret < 0)
+ dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
+ ret);
+ }
+
+ spi_imx->devtype_data->reset(spi_imx);
+
+ spi_imx->devtype_data->intctrl(spi_imx, 0);
+
+ controller->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_controller(controller);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "register controller failed\n");
+ goto out_register_controller;
+ }
+
+ pm_runtime_mark_last_busy(spi_imx->dev);
+ pm_runtime_put_autosuspend(spi_imx->dev);
+
+ return ret;
+
+out_register_controller:
+ if (spi_imx->devtype_data->has_dmamode)
+ spi_imx_sdma_exit(spi_imx);
+out_runtime_pm_put:
+ pm_runtime_dont_use_autosuspend(spi_imx->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(spi_imx->dev);
+
+ clk_disable_unprepare(spi_imx->clk_ipg);
+out_put_per:
+ clk_disable_unprepare(spi_imx->clk_per);
+out_controller_put:
+ spi_controller_put(controller);
+
+ return ret;
+}
+
+static int spi_imx_remove(struct platform_device *pdev)
+{
+ struct spi_controller *controller = platform_get_drvdata(pdev);
+ struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
+ int ret;
+
+ spi_unregister_controller(controller);
+
+ ret = pm_runtime_get_sync(spi_imx->dev);
+ if (ret >= 0)
+ writel(0, spi_imx->base + MXC_CSPICTRL);
+ else
+ dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");
+
+ pm_runtime_dont_use_autosuspend(spi_imx->dev);
+ pm_runtime_put_sync(spi_imx->dev);
+ pm_runtime_disable(spi_imx->dev);
+
+ spi_imx_sdma_exit(spi_imx);
+
+ return 0;
+}
+
+static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
+{
+ struct spi_controller *controller = dev_get_drvdata(dev);
+ struct spi_imx_data *spi_imx;
+ int ret;
+
+ spi_imx = spi_controller_get_devdata(controller);
+
+ ret = clk_prepare_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable_unprepare(spi_imx->clk_per);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *controller = dev_get_drvdata(dev);
+ struct spi_imx_data *spi_imx;
+
+ spi_imx = spi_controller_get_devdata(controller);
+
+ clk_disable_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
+
+ return 0;
+}
+
+static int __maybe_unused spi_imx_suspend(struct device *dev)
+{
+ pinctrl_pm_select_sleep_state(dev);
+ return 0;
+}
+
+static int __maybe_unused spi_imx_resume(struct device *dev)
+{
+ pinctrl_pm_select_default_state(dev);
+ return 0;
+}
+
+static const struct dev_pm_ops imx_spi_pm = {
+ SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
+ spi_imx_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
+};
+
+static struct platform_driver spi_imx_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = spi_imx_dt_ids,
+ .pm = &imx_spi_pm,
+ },
+ .probe = spi_imx_probe,
+ .remove = spi_imx_remove,
+};
+module_platform_driver(spi_imx_driver);
+
+MODULE_DESCRIPTION("i.MX SPI Controller driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-ingenic.c b/drivers/spi/spi-ingenic.c
new file mode 100644
index 000000000..713a238be
--- /dev/null
+++ b/drivers/spi/spi-ingenic.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPI bus driver for the Ingenic SoCs
+ * Copyright (c) 2017-2021 Artur Rojek <contact@artur-rojek.eu>
+ * Copyright (c) 2017-2021 Paul Cercueil <paul@crapouillou.net>
+ * Copyright (c) 2022 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#define REG_SSIDR 0x0
+#define REG_SSICR0 0x4
+#define REG_SSICR1 0x8
+#define REG_SSISR 0xc
+#define REG_SSIGR 0x18
+
+#define REG_SSICR0_TENDIAN_LSB BIT(19)
+#define REG_SSICR0_RENDIAN_LSB BIT(17)
+#define REG_SSICR0_SSIE BIT(15)
+#define REG_SSICR0_LOOP BIT(10)
+#define REG_SSICR0_EACLRUN BIT(7)
+#define REG_SSICR0_FSEL BIT(6)
+#define REG_SSICR0_TFLUSH BIT(2)
+#define REG_SSICR0_RFLUSH BIT(1)
+
+#define REG_SSICR1_FRMHL_MASK (BIT(31) | BIT(30))
+#define REG_SSICR1_FRMHL BIT(30)
+#define REG_SSICR1_LFST BIT(25)
+#define REG_SSICR1_UNFIN BIT(23)
+#define REG_SSICR1_PHA BIT(1)
+#define REG_SSICR1_POL BIT(0)
+
+#define REG_SSISR_END BIT(7)
+#define REG_SSISR_BUSY BIT(6)
+#define REG_SSISR_TFF BIT(5)
+#define REG_SSISR_RFE BIT(4)
+#define REG_SSISR_RFHF BIT(2)
+#define REG_SSISR_UNDR BIT(1)
+#define REG_SSISR_OVER BIT(0)
+
+#define SPI_INGENIC_FIFO_SIZE 128u
+
+struct jz_soc_info {
+ u32 bits_per_word_mask;
+ struct reg_field flen_field;
+ bool has_trendian;
+
+ unsigned int max_speed_hz;
+ unsigned int max_native_cs;
+};
+
+struct ingenic_spi {
+ const struct jz_soc_info *soc_info;
+ struct clk *clk;
+ struct resource *mem_res;
+
+ struct regmap *map;
+ struct regmap_field *flen_field;
+};
+
+static int spi_ingenic_wait(struct ingenic_spi *priv,
+ unsigned long mask,
+ bool condition)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(priv->map, REG_SSISR, val,
+ !!(val & mask) == condition,
+ 100, 10000);
+}
+
+static void spi_ingenic_set_cs(struct spi_device *spi, bool disable)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(spi->controller);
+
+ if (disable) {
+ regmap_clear_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
+ regmap_clear_bits(priv->map, REG_SSISR,
+ REG_SSISR_UNDR | REG_SSISR_OVER);
+
+ spi_ingenic_wait(priv, REG_SSISR_END, true);
+ } else {
+ regmap_set_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
+ }
+
+ regmap_set_bits(priv->map, REG_SSICR0,
+ REG_SSICR0_RFLUSH | REG_SSICR0_TFLUSH);
+}
+
+static void spi_ingenic_prepare_transfer(struct ingenic_spi *priv,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ unsigned long clk_hz = clk_get_rate(priv->clk);
+ u32 cdiv, speed_hz = xfer->speed_hz ?: spi->max_speed_hz,
+ bits_per_word = xfer->bits_per_word ?: spi->bits_per_word;
+
+ cdiv = clk_hz / (speed_hz * 2);
+ cdiv = clamp(cdiv, 1u, 0x100u) - 1;
+
+ regmap_write(priv->map, REG_SSIGR, cdiv);
+
+ regmap_field_write(priv->flen_field, bits_per_word - 2);
+}
+
+static void spi_ingenic_finalize_transfer(void *controller)
+{
+ spi_finalize_current_transfer(controller);
+}
+
+static struct dma_async_tx_descriptor *
+spi_ingenic_prepare_dma(struct spi_controller *ctlr, struct dma_chan *chan,
+ struct sg_table *sg, enum dma_transfer_direction dir,
+ unsigned int bits)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
+ struct dma_slave_config cfg = {
+ .direction = dir,
+ .src_addr = priv->mem_res->start + REG_SSIDR,
+ .dst_addr = priv->mem_res->start + REG_SSIDR,
+ };
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (bits > 16) {
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = cfg.dst_maxburst = 4;
+ } else if (bits > 8) {
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.src_maxburst = cfg.dst_maxburst = 2;
+ } else {
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ cfg.src_maxburst = cfg.dst_maxburst = 1;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ desc = dmaengine_prep_slave_sg(chan, sg->sgl, sg->nents, dir,
+ DMA_PREP_INTERRUPT);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ if (dir == DMA_DEV_TO_MEM) {
+ desc->callback = spi_ingenic_finalize_transfer;
+ desc->callback_param = ctlr;
+ }
+
+ cookie = dmaengine_submit(desc);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_desc_free(desc);
+ return ERR_PTR(ret);
+ }
+
+ return desc;
+}
+
+static int spi_ingenic_dma_tx(struct spi_controller *ctlr,
+ struct spi_transfer *xfer, unsigned int bits)
+{
+ struct dma_async_tx_descriptor *rx_desc, *tx_desc;
+
+ rx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_rx,
+ &xfer->rx_sg, DMA_DEV_TO_MEM, bits);
+ if (IS_ERR(rx_desc))
+ return PTR_ERR(rx_desc);
+
+ tx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_tx,
+ &xfer->tx_sg, DMA_MEM_TO_DEV, bits);
+ if (IS_ERR(tx_desc)) {
+ dmaengine_terminate_async(ctlr->dma_rx);
+ dmaengine_desc_free(rx_desc);
+ return PTR_ERR(tx_desc);
+ }
+
+ dma_async_issue_pending(ctlr->dma_rx);
+ dma_async_issue_pending(ctlr->dma_tx);
+
+ return 1;
+}
+
+#define SPI_INGENIC_TX(x) \
+static int spi_ingenic_tx##x(struct ingenic_spi *priv, \
+ struct spi_transfer *xfer) \
+{ \
+ unsigned int count = xfer->len / (x / 8); \
+ unsigned int prefill = min(count, SPI_INGENIC_FIFO_SIZE); \
+ const u##x *tx_buf = xfer->tx_buf; \
+ u##x *rx_buf = xfer->rx_buf; \
+ unsigned int i, val; \
+ int err; \
+ \
+ /* Fill up the TX fifo */ \
+ for (i = 0; i < prefill; i++) { \
+ val = tx_buf ? tx_buf[i] : 0; \
+ \
+ regmap_write(priv->map, REG_SSIDR, val); \
+ } \
+ \
+ for (i = 0; i < count; i++) { \
+ err = spi_ingenic_wait(priv, REG_SSISR_RFE, false); \
+ if (err) \
+ return err; \
+ \
+ regmap_read(priv->map, REG_SSIDR, &val); \
+ if (rx_buf) \
+ rx_buf[i] = val; \
+ \
+ if (i < count - prefill) { \
+ val = tx_buf ? tx_buf[i + prefill] : 0; \
+ \
+ regmap_write(priv->map, REG_SSIDR, val); \
+ } \
+ } \
+ \
+ return 0; \
+}
+SPI_INGENIC_TX(8)
+SPI_INGENIC_TX(16)
+SPI_INGENIC_TX(32)
+#undef SPI_INGENIC_TX
+
+static int spi_ingenic_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
+ unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word;
+ bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer);
+
+ spi_ingenic_prepare_transfer(priv, spi, xfer);
+
+ if (ctlr->cur_msg_mapped && can_dma)
+ return spi_ingenic_dma_tx(ctlr, xfer, bits);
+
+ if (bits > 16)
+ return spi_ingenic_tx32(priv, xfer);
+
+ if (bits > 8)
+ return spi_ingenic_tx16(priv, xfer);
+
+ return spi_ingenic_tx8(priv, xfer);
+}
+
+static int spi_ingenic_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = message->spi;
+ unsigned int cs = REG_SSICR1_FRMHL << spi->chip_select;
+ unsigned int ssicr0_mask = REG_SSICR0_LOOP | REG_SSICR0_FSEL;
+ unsigned int ssicr1_mask = REG_SSICR1_PHA | REG_SSICR1_POL | cs;
+ unsigned int ssicr0 = 0, ssicr1 = 0;
+
+ if (priv->soc_info->has_trendian) {
+ ssicr0_mask |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ ssicr0 |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
+ } else {
+ ssicr1_mask |= REG_SSICR1_LFST;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ ssicr1 |= REG_SSICR1_LFST;
+ }
+
+ if (spi->mode & SPI_LOOP)
+ ssicr0 |= REG_SSICR0_LOOP;
+ if (spi->chip_select)
+ ssicr0 |= REG_SSICR0_FSEL;
+
+ if (spi->mode & SPI_CPHA)
+ ssicr1 |= REG_SSICR1_PHA;
+ if (spi->mode & SPI_CPOL)
+ ssicr1 |= REG_SSICR1_POL;
+ if (spi->mode & SPI_CS_HIGH)
+ ssicr1 |= cs;
+
+ regmap_update_bits(priv->map, REG_SSICR0, ssicr0_mask, ssicr0);
+ regmap_update_bits(priv->map, REG_SSICR1, ssicr1_mask, ssicr1);
+
+ return 0;
+}
+
+static int spi_ingenic_prepare_hardware(struct spi_controller *ctlr)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ regmap_write(priv->map, REG_SSICR0, REG_SSICR0_EACLRUN);
+ regmap_write(priv->map, REG_SSICR1, 0);
+ regmap_write(priv->map, REG_SSISR, 0);
+ regmap_set_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
+
+ return 0;
+}
+
+static int spi_ingenic_unprepare_hardware(struct spi_controller *ctlr)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
+
+ regmap_clear_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static bool spi_ingenic_can_dma(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct dma_slave_caps caps;
+ int ret;
+
+ ret = dma_get_slave_caps(ctlr->dma_tx, &caps);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to get slave caps: %d\n", ret);
+ return false;
+ }
+
+ return !caps.max_sg_burst ||
+ xfer->len <= caps.max_sg_burst * SPI_INGENIC_FIFO_SIZE;
+}
+
+static int spi_ingenic_request_dma(struct spi_controller *ctlr,
+ struct device *dev)
+{
+ ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
+ if (!ctlr->dma_tx)
+ return -ENODEV;
+
+ ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
+
+ if (!ctlr->dma_rx)
+ return -ENODEV;
+
+ ctlr->can_dma = spi_ingenic_can_dma;
+
+ return 0;
+}
+
+static void spi_ingenic_release_dma(void *data)
+{
+ struct spi_controller *ctlr = data;
+
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
+}
+
+static const struct regmap_config spi_ingenic_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_SSIGR,
+};
+
+static int spi_ingenic_probe(struct platform_device *pdev)
+{
+ const struct jz_soc_info *pdata;
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctlr;
+ struct ingenic_spi *priv;
+ void __iomem *base;
+ int num_cs, ret;
+
+ pdata = of_device_get_match_data(dev);
+ if (!pdata) {
+ dev_err(dev, "Missing platform data.\n");
+ return -EINVAL;
+ }
+
+ ctlr = devm_spi_alloc_master(dev, sizeof(*priv));
+ if (!ctlr) {
+ dev_err(dev, "Unable to allocate SPI controller.\n");
+ return -ENOMEM;
+ }
+
+ priv = spi_controller_get_devdata(ctlr);
+ priv->soc_info = pdata;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "Unable to get clock.\n");
+ }
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->mem_res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->map = devm_regmap_init_mmio(dev, base, &spi_ingenic_regmap_config);
+ if (IS_ERR(priv->map))
+ return PTR_ERR(priv->map);
+
+ priv->flen_field = devm_regmap_field_alloc(dev, priv->map,
+ pdata->flen_field);
+ if (IS_ERR(priv->flen_field))
+ return PTR_ERR(priv->flen_field);
+
+ if (device_property_read_u32(dev, "num-cs", &num_cs))
+ num_cs = pdata->max_native_cs;
+
+ platform_set_drvdata(pdev, ctlr);
+
+ ctlr->prepare_transfer_hardware = spi_ingenic_prepare_hardware;
+ ctlr->unprepare_transfer_hardware = spi_ingenic_unprepare_hardware;
+ ctlr->prepare_message = spi_ingenic_prepare_message;
+ ctlr->set_cs = spi_ingenic_set_cs;
+ ctlr->transfer_one = spi_ingenic_transfer_one;
+ ctlr->mode_bits = SPI_MODE_3 | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH;
+ ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ ctlr->max_dma_len = SPI_INGENIC_FIFO_SIZE;
+ ctlr->bits_per_word_mask = pdata->bits_per_word_mask;
+ ctlr->min_speed_hz = 7200;
+ ctlr->max_speed_hz = pdata->max_speed_hz;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = pdata->max_native_cs;
+ ctlr->num_chipselect = num_cs;
+ ctlr->dev.of_node = pdev->dev.of_node;
+
+ if (spi_ingenic_request_dma(ctlr, dev))
+ dev_warn(dev, "DMA not available.\n");
+
+ ret = devm_add_action_or_reset(dev, spi_ingenic_release_dma, ctlr);
+ if (ret) {
+ dev_err(dev, "Unable to add action.\n");
+ return ret;
+ }
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret)
+ dev_err(dev, "Unable to register SPI controller.\n");
+
+ return ret;
+}
+
+static const struct jz_soc_info jz4750_soc_info = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 17),
+ .flen_field = REG_FIELD(REG_SSICR1, 4, 7),
+ .has_trendian = false,
+
+ .max_speed_hz = 54000000,
+ .max_native_cs = 2,
+};
+
+static const struct jz_soc_info jz4780_soc_info = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
+ .flen_field = REG_FIELD(REG_SSICR1, 3, 7),
+ .has_trendian = true,
+
+ .max_speed_hz = 54000000,
+ .max_native_cs = 2,
+};
+
+static const struct jz_soc_info x1000_soc_info = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
+ .flen_field = REG_FIELD(REG_SSICR1, 3, 7),
+ .has_trendian = true,
+
+ .max_speed_hz = 50000000,
+ .max_native_cs = 2,
+};
+
+static const struct jz_soc_info x2000_soc_info = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
+ .flen_field = REG_FIELD(REG_SSICR1, 3, 7),
+ .has_trendian = true,
+
+ .max_speed_hz = 50000000,
+ .max_native_cs = 1,
+};
+
+static const struct of_device_id spi_ingenic_of_match[] = {
+ { .compatible = "ingenic,jz4750-spi", .data = &jz4750_soc_info },
+ { .compatible = "ingenic,jz4775-spi", .data = &jz4780_soc_info },
+ { .compatible = "ingenic,jz4780-spi", .data = &jz4780_soc_info },
+ { .compatible = "ingenic,x1000-spi", .data = &x1000_soc_info },
+ { .compatible = "ingenic,x2000-spi", .data = &x2000_soc_info },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_ingenic_of_match);
+
+static struct platform_driver spi_ingenic_driver = {
+ .driver = {
+ .name = "spi-ingenic",
+ .of_match_table = spi_ingenic_of_match,
+ },
+ .probe = spi_ingenic_probe,
+};
+
+module_platform_driver(spi_ingenic_driver);
+MODULE_DESCRIPTION("SPI bus driver for the Ingenic SoCs");
+MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
new file mode 100644
index 000000000..b718a74fa
--- /dev/null
+++ b/drivers/spi/spi-intel-pci.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel PCH/PCU SPI flash PCI driver.
+ *
+ * Copyright (C) 2016 - 2022, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "spi-intel.h"
+
+#define BCR 0xdc
+#define BCR_WPD BIT(0)
+
+static bool intel_spi_pci_set_writeable(void __iomem *base, void *data)
+{
+ struct pci_dev *pdev = data;
+ u32 bcr;
+
+ /* Try to make the chip read/write */
+ pci_read_config_dword(pdev, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_write_config_dword(pdev, BCR, bcr);
+ pci_read_config_dword(pdev, BCR, &bcr);
+ }
+
+ return bcr & BCR_WPD;
+}
+
+static const struct intel_spi_boardinfo bxt_info = {
+ .type = INTEL_SPI_BXT,
+ .set_writeable = intel_spi_pci_set_writeable,
+};
+
+static const struct intel_spi_boardinfo cnl_info = {
+ .type = INTEL_SPI_CNL,
+ .set_writeable = intel_spi_pci_set_writeable,
+};
+
+static int intel_spi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct intel_spi_boardinfo *info;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->data = pdev;
+ return intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
+}
+
+static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
+
+static struct pci_driver intel_spi_pci_driver = {
+ .name = "intel-spi",
+ .id_table = intel_spi_pci_ids,
+ .probe = intel_spi_pci_probe,
+};
+
+module_pci_driver(intel_spi_pci_driver);
+
+MODULE_DESCRIPTION("Intel PCH/PCU SPI flash PCI driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-intel-platform.c b/drivers/spi/spi-intel-platform.c
new file mode 100644
index 000000000..2ef09fa35
--- /dev/null
+++ b/drivers/spi/spi-intel-platform.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel PCH/PCU SPI flash platform driver.
+ *
+ * Copyright (C) 2016 - 2022, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "spi-intel.h"
+
+static int intel_spi_platform_probe(struct platform_device *pdev)
+{
+ struct intel_spi_boardinfo *info;
+ struct resource *mem;
+
+ info = dev_get_platdata(&pdev->dev);
+ if (!info)
+ return -EINVAL;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ return intel_spi_probe(&pdev->dev, mem, info);
+}
+
+static struct platform_driver intel_spi_platform_driver = {
+ .probe = intel_spi_platform_probe,
+ .driver = {
+ .name = "intel-spi",
+ },
+};
+
+module_platform_driver(intel_spi_platform_driver);
+
+MODULE_DESCRIPTION("Intel PCH/PCU SPI flash platform driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:intel-spi");
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
new file mode 100644
index 000000000..54fc226e1
--- /dev/null
+++ b/drivers/spi/spi-intel.c
@@ -0,0 +1,1431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016 - 2022, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/iopoll.h>
+#include <linux/module.h>
+
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#include "spi-intel.h"
+
+/* Offsets are from @ispi->base */
+#define BFPREG 0x00
+
+#define HSFSTS_CTL 0x04
+#define HSFSTS_CTL_FSMIE BIT(31)
+#define HSFSTS_CTL_FDBC_SHIFT 24
+#define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT)
+
+#define HSFSTS_CTL_FCYCLE_SHIFT 17
+#define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
+/* HW sequencer opcodes */
+#define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
+
+#define HSFSTS_CTL_FGO BIT(16)
+#define HSFSTS_CTL_FLOCKDN BIT(15)
+#define HSFSTS_CTL_FDV BIT(14)
+#define HSFSTS_CTL_SCIP BIT(5)
+#define HSFSTS_CTL_AEL BIT(2)
+#define HSFSTS_CTL_FCERR BIT(1)
+#define HSFSTS_CTL_FDONE BIT(0)
+
+#define FADDR 0x08
+#define DLOCK 0x0c
+#define FDATA(n) (0x10 + ((n) * 4))
+
+#define FRACC 0x50
+
+#define FREG(n) (0x54 + ((n) * 4))
+#define FREG_BASE_MASK GENMASK(14, 0)
+#define FREG_LIMIT_SHIFT 16
+#define FREG_LIMIT_MASK GENMASK(30, 16)
+
+/* Offset is from @ispi->pregs */
+#define PR(n) ((n) * 4)
+#define PR_WPE BIT(31)
+#define PR_LIMIT_SHIFT 16
+#define PR_LIMIT_MASK GENMASK(30, 16)
+#define PR_RPE BIT(15)
+#define PR_BASE_MASK GENMASK(14, 0)
+
+/* Offsets are from @ispi->sregs */
+#define SSFSTS_CTL 0x00
+#define SSFSTS_CTL_FSMIE BIT(23)
+#define SSFSTS_CTL_DS BIT(22)
+#define SSFSTS_CTL_DBC_SHIFT 16
+#define SSFSTS_CTL_SPOP BIT(11)
+#define SSFSTS_CTL_ACS BIT(10)
+#define SSFSTS_CTL_SCGO BIT(9)
+#define SSFSTS_CTL_COP_SHIFT 12
+#define SSFSTS_CTL_FRS BIT(7)
+#define SSFSTS_CTL_DOFRS BIT(6)
+#define SSFSTS_CTL_AEL BIT(4)
+#define SSFSTS_CTL_FCERR BIT(3)
+#define SSFSTS_CTL_FDONE BIT(2)
+#define SSFSTS_CTL_SCIP BIT(0)
+
+#define PREOP_OPTYPE 0x04
+#define OPMENU0 0x08
+#define OPMENU1 0x0c
+
+#define OPTYPE_READ_NO_ADDR 0
+#define OPTYPE_WRITE_NO_ADDR 1
+#define OPTYPE_READ_WITH_ADDR 2
+#define OPTYPE_WRITE_WITH_ADDR 3
+
+/* CPU specifics */
+#define BYT_PR 0x74
+#define BYT_SSFSTS_CTL 0x90
+#define BYT_FREG_NUM 5
+#define BYT_PR_NUM 5
+
+#define LPT_PR 0x74
+#define LPT_SSFSTS_CTL 0x90
+#define LPT_FREG_NUM 5
+#define LPT_PR_NUM 5
+
+#define BXT_PR 0x84
+#define BXT_SSFSTS_CTL 0xa0
+#define BXT_FREG_NUM 12
+#define BXT_PR_NUM 6
+
+#define CNL_PR 0x84
+#define CNL_FREG_NUM 6
+#define CNL_PR_NUM 5
+
+#define LVSCC 0xc4
+#define UVSCC 0xc8
+#define ERASE_OPCODE_SHIFT 8
+#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+#define ERASE_64K_OPCODE_SHIFT 16
+#define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT)
+
+/* Flash descriptor fields */
+#define FLVALSIG_MAGIC 0x0ff0a55a
+#define FLMAP0_NC_MASK GENMASK(9, 8)
+#define FLMAP0_NC_SHIFT 8
+#define FLMAP0_FCBA_MASK GENMASK(7, 0)
+
+#define FLCOMP_C0DEN_MASK GENMASK(3, 0)
+#define FLCOMP_C0DEN_512K 0x00
+#define FLCOMP_C0DEN_1M 0x01
+#define FLCOMP_C0DEN_2M 0x02
+#define FLCOMP_C0DEN_4M 0x03
+#define FLCOMP_C0DEN_8M 0x04
+#define FLCOMP_C0DEN_16M 0x05
+#define FLCOMP_C0DEN_32M 0x06
+#define FLCOMP_C0DEN_64M 0x07
+
+#define INTEL_SPI_TIMEOUT 5000 /* ms */
+#define INTEL_SPI_FIFO_SZ 64
+
+/**
+ * struct intel_spi - Driver private data
+ * @dev: Device pointer
+ * @info: Pointer to board specific info
+ * @base: Beginning of MMIO space
+ * @pregs: Start of protection registers
+ * @sregs: Start of software sequencer registers
+ * @master: Pointer to the SPI controller structure
+ * @nregions: Maximum number of regions
+ * @pr_num: Maximum number of protected range registers
+ * @chip0_size: Size of the first flash chip in bytes
+ * @locked: Is SPI setting locked
+ * @swseq_reg: Use SW sequencer in register reads/writes
+ * @swseq_erase: Use SW sequencer in erase operation
+ * @atomic_preopcode: Holds preopcode when atomic sequence is requested
+ * @opcodes: Opcodes which are supported. This are programmed by BIOS
+ * before it locks down the controller.
+ * @mem_ops: Pointer to SPI MEM ops supported by the controller
+ */
+struct intel_spi {
+ struct device *dev;
+ const struct intel_spi_boardinfo *info;
+ void __iomem *base;
+ void __iomem *pregs;
+ void __iomem *sregs;
+ struct spi_controller *master;
+ size_t nregions;
+ size_t pr_num;
+ size_t chip0_size;
+ bool locked;
+ bool swseq_reg;
+ bool swseq_erase;
+ u8 atomic_preopcode;
+ u8 opcodes[8];
+ const struct intel_spi_mem_op *mem_ops;
+};
+
+struct intel_spi_mem_op {
+ struct spi_mem_op mem_op;
+ u32 replacement_op;
+ int (*exec_op)(struct intel_spi *ispi,
+ const struct spi_mem *mem,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op);
+};
+
+static bool writeable;
+module_param(writeable, bool, 0);
+MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
+
+static void intel_spi_dump_regs(struct intel_spi *ispi)
+{
+ u32 value;
+ int i;
+
+ dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
+
+ value = readl(ispi->base + HSFSTS_CTL);
+ dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
+ if (value & HSFSTS_CTL_FLOCKDN)
+ dev_dbg(ispi->dev, "-> Locked\n");
+
+ dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
+ dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
+
+ for (i = 0; i < 16; i++)
+ dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
+ i, readl(ispi->base + FDATA(i)));
+
+ dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
+
+ for (i = 0; i < ispi->nregions; i++)
+ dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
+ readl(ispi->base + FREG(i)));
+ for (i = 0; i < ispi->pr_num; i++)
+ dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
+ readl(ispi->pregs + PR(i)));
+
+ if (ispi->sregs) {
+ value = readl(ispi->sregs + SSFSTS_CTL);
+ dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
+ dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
+ readl(ispi->sregs + PREOP_OPTYPE));
+ dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
+ readl(ispi->sregs + OPMENU0));
+ dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
+ readl(ispi->sregs + OPMENU1));
+ }
+
+ dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
+ dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
+
+ dev_dbg(ispi->dev, "Protected regions:\n");
+ for (i = 0; i < ispi->pr_num; i++) {
+ u32 base, limit;
+
+ value = readl(ispi->pregs + PR(i));
+ if (!(value & (PR_WPE | PR_RPE)))
+ continue;
+
+ limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
+ base = value & PR_BASE_MASK;
+
+ dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
+ i, base << 12, (limit << 12) | 0xfff,
+ value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.');
+ }
+
+ dev_dbg(ispi->dev, "Flash regions:\n");
+ for (i = 0; i < ispi->nregions; i++) {
+ u32 region, base, limit;
+
+ region = readl(ispi->base + FREG(i));
+ base = region & FREG_BASE_MASK;
+ limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
+
+ if (base >= limit || (i > 0 && limit == 0))
+ dev_dbg(ispi->dev, " %02d disabled\n", i);
+ else
+ dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
+ i, base << 12, (limit << 12) | 0xfff);
+ }
+
+ dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
+ ispi->swseq_reg ? 'S' : 'H');
+ dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
+ ispi->swseq_erase ? 'S' : 'H');
+}
+
+/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
+static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
+{
+ size_t bytes;
+ int i = 0;
+
+ if (size > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ while (size > 0) {
+ bytes = min_t(size_t, size, 4);
+ memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
+ size -= bytes;
+ buf += bytes;
+ i++;
+ }
+
+ return 0;
+}
+
+/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
+static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
+ size_t size)
+{
+ size_t bytes;
+ int i = 0;
+
+ if (size > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ while (size > 0) {
+ bytes = min_t(size_t, size, 4);
+ memcpy_toio(ispi->base + FDATA(i), buf, bytes);
+ size -= bytes;
+ buf += bytes;
+ i++;
+ }
+
+ return 0;
+}
+
+static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
+{
+ u32 val;
+
+ return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
+ !(val & HSFSTS_CTL_SCIP), 0,
+ INTEL_SPI_TIMEOUT * 1000);
+}
+
+static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
+{
+ u32 val;
+
+ return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
+ !(val & SSFSTS_CTL_SCIP), 0,
+ INTEL_SPI_TIMEOUT * 1000);
+}
+
+static bool intel_spi_set_writeable(struct intel_spi *ispi)
+{
+ if (!ispi->info->set_writeable)
+ return false;
+
+ return ispi->info->set_writeable(ispi->base, ispi->info->data);
+}
+
+static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
+{
+ int i;
+ int preop;
+
+ if (ispi->locked) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
+ if (ispi->opcodes[i] == opcode)
+ return i;
+
+ return -EINVAL;
+ }
+
+ /* The lock is off, so just use index 0 */
+ writel(opcode, ispi->sregs + OPMENU0);
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
+
+ return 0;
+}
+
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len)
+{
+ u32 val, status;
+ int ret;
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
+
+ switch (opcode) {
+ case SPINOR_OP_RDID:
+ val |= HSFSTS_CTL_FCYCLE_RDID;
+ break;
+ case SPINOR_OP_WRSR:
+ val |= HSFSTS_CTL_FCYCLE_WRSR;
+ break;
+ case SPINOR_OP_RDSR:
+ val |= HSFSTS_CTL_FCYCLE_RDSR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
+ val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ return -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ return -EACCES;
+
+ return 0;
+}
+
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
+ int optype)
+{
+ u32 val = 0, status;
+ u8 atomic_preopcode;
+ int ret;
+
+ ret = intel_spi_opcode_index(ispi, opcode, optype);
+ if (ret < 0)
+ return ret;
+
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ /*
+ * Always clear it after each SW sequencer operation regardless
+ * of whether it is successful or not.
+ */
+ atomic_preopcode = ispi->atomic_preopcode;
+ ispi->atomic_preopcode = 0;
+
+ /* Only mark 'Data Cycle' bit when there is data to be transferred */
+ if (len > 0)
+ val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
+ val |= ret << SSFSTS_CTL_COP_SHIFT;
+ val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
+ val |= SSFSTS_CTL_SCGO;
+ if (atomic_preopcode) {
+ u16 preop;
+
+ switch (optype) {
+ case OPTYPE_WRITE_NO_ADDR:
+ case OPTYPE_WRITE_WITH_ADDR:
+ /* Pick matching preopcode for the atomic sequence */
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ if ((preop & 0xff) == atomic_preopcode)
+ ; /* Do nothing */
+ else if ((preop >> 8) == atomic_preopcode)
+ val |= SSFSTS_CTL_SPOP;
+ else
+ return -EINVAL;
+
+ /* Enable atomic sequence */
+ val |= SSFSTS_CTL_ACS;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+ writel(val, ispi->sregs + SSFSTS_CTL);
+
+ ret = intel_spi_wait_sw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->sregs + SSFSTS_CTL);
+ if (status & SSFSTS_CTL_FCERR)
+ return -EIO;
+ else if (status & SSFSTS_CTL_AEL)
+ return -EACCES;
+
+ return 0;
+}
+
+static u32 intel_spi_chip_addr(const struct intel_spi *ispi,
+ const struct spi_mem *mem)
+{
+ /* Pick up the correct start address */
+ if (!mem)
+ return 0;
+ return mem->spi->chip_select == 1 ? ispi->chip0_size : 0;
+}
+
+static int intel_spi_read_reg(struct intel_spi *ispi, const struct spi_mem *mem,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
+{
+ size_t nbytes = op->data.nbytes;
+ u8 opcode = op->cmd.opcode;
+ int ret;
+
+ writel(intel_spi_chip_addr(ispi, mem), ispi->base + FADDR);
+
+ if (ispi->swseq_reg)
+ ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
+ OPTYPE_READ_NO_ADDR);
+ else
+ ret = intel_spi_hw_cycle(ispi, opcode, nbytes);
+
+ if (ret)
+ return ret;
+
+ return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
+}
+
+static int intel_spi_write_reg(struct intel_spi *ispi, const struct spi_mem *mem,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
+{
+ size_t nbytes = op->data.nbytes;
+ u8 opcode = op->cmd.opcode;
+ int ret;
+
+ /*
+ * This is handled with atomic operation and preop code in Intel
+ * controller so we only verify that it is available. If the
+ * controller is not locked, program the opcode to the PREOP
+ * register for later use.
+ *
+ * When hardware sequencer is used there is no need to program
+ * any opcodes (it handles them automatically as part of a command).
+ */
+ if (opcode == SPINOR_OP_WREN) {
+ u16 preop;
+
+ if (!ispi->swseq_reg)
+ return 0;
+
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
+ if (ispi->locked)
+ return -EINVAL;
+ writel(opcode, ispi->sregs + PREOP_OPTYPE);
+ }
+
+ /*
+ * This enables atomic sequence on next SW sycle. Will
+ * be cleared after next operation.
+ */
+ ispi->atomic_preopcode = opcode;
+ return 0;
+ }
+
+ /*
+ * We hope that HW sequencer will do the right thing automatically and
+ * with the SW sequencer we cannot use preopcode anyway, so just ignore
+ * the Write Disable operation and pretend it was completed
+ * successfully.
+ */
+ if (opcode == SPINOR_OP_WRDI)
+ return 0;
+
+ writel(intel_spi_chip_addr(ispi, mem), ispi->base + FADDR);
+
+ /* Write the value beforehand */
+ ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
+ if (ret)
+ return ret;
+
+ if (ispi->swseq_reg)
+ return intel_spi_sw_cycle(ispi, opcode, nbytes,
+ OPTYPE_WRITE_NO_ADDR);
+ return intel_spi_hw_cycle(ispi, opcode, nbytes);
+}
+
+static int intel_spi_read(struct intel_spi *ispi, const struct spi_mem *mem,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
+{
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
+ size_t block_size, nbytes = op->data.nbytes;
+ void *read_buf = op->data.buf.in;
+ u32 val, status;
+ int ret;
+
+ /*
+ * Atomic sequence is not expected with HW sequencer reads. Make
+ * sure it is cleared regardless.
+ */
+ if (WARN_ON_ONCE(ispi->atomic_preopcode))
+ ispi->atomic_preopcode = 0;
+
+ while (nbytes > 0) {
+ block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
+
+ /* Read cannot cross 4K boundary */
+ block_size = min_t(loff_t, addr + block_size,
+ round_up(addr + 1, SZ_4K)) - addr;
+
+ writel(addr, ispi->base + FADDR);
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
+ val |= HSFSTS_CTL_FCYCLE_READ;
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ ret = -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ ret = -EACCES;
+
+ if (ret < 0) {
+ dev_err(ispi->dev, "read error: %x: %#x\n", addr, status);
+ return ret;
+ }
+
+ ret = intel_spi_read_block(ispi, read_buf, block_size);
+ if (ret)
+ return ret;
+
+ nbytes -= block_size;
+ addr += block_size;
+ read_buf += block_size;
+ }
+
+ return 0;
+}
+
+static int intel_spi_write(struct intel_spi *ispi, const struct spi_mem *mem,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
+{
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
+ size_t block_size, nbytes = op->data.nbytes;
+ const void *write_buf = op->data.buf.out;
+ u32 val, status;
+ int ret;
+
+ /* Not needed with HW sequencer write, make sure it is cleared */
+ ispi->atomic_preopcode = 0;
+
+ while (nbytes > 0) {
+ block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
+
+ /* Write cannot cross 4K boundary */
+ block_size = min_t(loff_t, addr + block_size,
+ round_up(addr + 1, SZ_4K)) - addr;
+
+ writel(addr, ispi->base + FADDR);
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
+ val |= HSFSTS_CTL_FCYCLE_WRITE;
+
+ ret = intel_spi_write_block(ispi, write_buf, block_size);
+ if (ret) {
+ dev_err(ispi->dev, "failed to write block\n");
+ return ret;
+ }
+
+ /* Start the write now */
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret) {
+ dev_err(ispi->dev, "timeout\n");
+ return ret;
+ }
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ ret = -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ ret = -EACCES;
+
+ if (ret < 0) {
+ dev_err(ispi->dev, "write error: %x: %#x\n", addr, status);
+ return ret;
+ }
+
+ nbytes -= block_size;
+ addr += block_size;
+ write_buf += block_size;
+ }
+
+ return 0;
+}
+
+static int intel_spi_erase(struct intel_spi *ispi, const struct spi_mem *mem,
+ const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
+{
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
+ u8 opcode = op->cmd.opcode;
+ u32 val, status;
+ int ret;
+
+ writel(addr, ispi->base + FADDR);
+
+ if (ispi->swseq_erase)
+ return intel_spi_sw_cycle(ispi, opcode, 0,
+ OPTYPE_WRITE_WITH_ADDR);
+
+ /* Not needed with HW sequencer erase, make sure it is cleared */
+ ispi->atomic_preopcode = 0;
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= HSFSTS_CTL_FGO;
+ val |= iop->replacement_op;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ return -EIO;
+ if (status & HSFSTS_CTL_AEL)
+ return -EACCES;
+
+ return 0;
+}
+
+static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
+ const struct spi_mem_op *op)
+{
+ if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
+ iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
+ iop->mem_op.cmd.dtr != op->cmd.dtr ||
+ iop->mem_op.cmd.opcode != op->cmd.opcode)
+ return false;
+
+ if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
+ iop->mem_op.addr.dtr != op->addr.dtr)
+ return false;
+
+ if (iop->mem_op.data.dir != op->data.dir ||
+ iop->mem_op.data.dtr != op->data.dtr)
+ return false;
+
+ if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) {
+ if (iop->mem_op.data.buswidth != op->data.buswidth)
+ return false;
+ }
+
+ return true;
+}
+
+static const struct intel_spi_mem_op *
+intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
+{
+ const struct intel_spi_mem_op *iop;
+
+ for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
+ if (intel_spi_cmp_mem_op(iop, op))
+ break;
+ }
+
+ return iop->mem_op.cmd.opcode ? iop : NULL;
+}
+
+static bool intel_spi_supports_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+ const struct intel_spi_mem_op *iop;
+
+ iop = intel_spi_match_mem_op(ispi, op);
+ if (!iop) {
+ dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
+ return false;
+ }
+
+ /*
+ * For software sequencer check that the opcode is actually
+ * present in the opmenu if it is locked.
+ */
+ if (ispi->swseq_reg && ispi->locked) {
+ int i;
+
+ /* Check if it is in the locked opcodes list */
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) {
+ if (ispi->opcodes[i] == op->cmd.opcode)
+ return true;
+ }
+
+ dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
+ return false;
+ }
+
+ return true;
+}
+
+static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+ const struct intel_spi_mem_op *iop;
+
+ iop = intel_spi_match_mem_op(ispi, op);
+ if (!iop)
+ return -EOPNOTSUPP;
+
+ return iop->exec_op(ispi, mem, iop, op);
+}
+
+static const char *intel_spi_get_name(struct spi_mem *mem)
+{
+ const struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+
+ /*
+ * Return name of the flash controller device to be compatible
+ * with the MTD version.
+ */
+ return dev_name(ispi->dev);
+}
+
+static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
+ const struct intel_spi_mem_op *iop;
+
+ iop = intel_spi_match_mem_op(ispi, &desc->info.op_tmpl);
+ if (!iop)
+ return -EOPNOTSUPP;
+
+ desc->priv = (void *)iop;
+ return 0;
+}
+
+static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
+ size_t len, void *buf)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
+ const struct intel_spi_mem_op *iop = desc->priv;
+ struct spi_mem_op op = desc->info.op_tmpl;
+ int ret;
+
+ /* Fill in the gaps */
+ op.addr.val = offs;
+ op.data.nbytes = len;
+ op.data.buf.in = buf;
+
+ ret = iop->exec_op(ispi, desc->mem, iop, &op);
+ return ret ? ret : len;
+}
+
+static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs,
+ size_t len, const void *buf)
+{
+ struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
+ const struct intel_spi_mem_op *iop = desc->priv;
+ struct spi_mem_op op = desc->info.op_tmpl;
+ int ret;
+
+ op.addr.val = offs;
+ op.data.nbytes = len;
+ op.data.buf.out = buf;
+
+ ret = iop->exec_op(ispi, desc->mem, iop, &op);
+ return ret ? ret : len;
+}
+
+static const struct spi_controller_mem_ops intel_spi_mem_ops = {
+ .supports_op = intel_spi_supports_mem_op,
+ .exec_op = intel_spi_exec_mem_op,
+ .get_name = intel_spi_get_name,
+ .dirmap_create = intel_spi_dirmap_create,
+ .dirmap_read = intel_spi_dirmap_read,
+ .dirmap_write = intel_spi_dirmap_write,
+};
+
+#define INTEL_SPI_OP_ADDR(__nbytes) \
+ { \
+ .nbytes = __nbytes, \
+ }
+
+#define INTEL_SPI_OP_NO_DATA \
+ { \
+ .dir = SPI_MEM_NO_DATA, \
+ }
+
+#define INTEL_SPI_OP_DATA_IN(__buswidth) \
+ { \
+ .dir = SPI_MEM_DATA_IN, \
+ .buswidth = __buswidth, \
+ }
+
+#define INTEL_SPI_OP_DATA_OUT(__buswidth) \
+ { \
+ .dir = SPI_MEM_DATA_OUT, \
+ .buswidth = __buswidth, \
+ }
+
+#define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op) \
+ { \
+ .mem_op = { \
+ .cmd = __cmd, \
+ .addr = __addr, \
+ .data = __data, \
+ }, \
+ .exec_op = __exec_op, \
+ }
+
+#define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl) \
+ { \
+ .mem_op = { \
+ .cmd = __cmd, \
+ .addr = __addr, \
+ .data = __data, \
+ }, \
+ .exec_op = __exec_op, \
+ .replacement_op = __repl, \
+ }
+
+/*
+ * The controller handles pretty much everything internally based on the
+ * SFDP data but we want to make sure we only support the operations
+ * actually possible. Only check buswidth and transfer direction, the
+ * core validates data.
+ */
+#define INTEL_SPI_GENERIC_OPS \
+ /* Status register operations */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read_reg), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read_reg), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write_reg), \
+ /* Normal read */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Fast read */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Read with 4-byte address opcode */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Fast read with 4-byte address opcode */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(2), \
+ intel_spi_read), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_IN(4), \
+ intel_spi_read), \
+ /* Write operations */ \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_write_reg), \
+ INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_write_reg), \
+ /* Erase operations */ \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_erase, \
+ HSFSTS_CTL_FCYCLE_ERASE), \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_erase, \
+ HSFSTS_CTL_FCYCLE_ERASE), \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1), \
+ INTEL_SPI_OP_ADDR(4), \
+ SPI_MEM_OP_NO_DATA, \
+ intel_spi_erase, \
+ HSFSTS_CTL_FCYCLE_ERASE) \
+
+static const struct intel_spi_mem_op generic_mem_ops[] = {
+ INTEL_SPI_GENERIC_OPS,
+ { },
+};
+
+static const struct intel_spi_mem_op erase_64k_mem_ops[] = {
+ INTEL_SPI_GENERIC_OPS,
+ /* 64k sector erase operations */
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
+ INTEL_SPI_OP_ADDR(3),
+ SPI_MEM_OP_NO_DATA,
+ intel_spi_erase,
+ HSFSTS_CTL_FCYCLE_ERASE_64K),
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
+ INTEL_SPI_OP_ADDR(4),
+ SPI_MEM_OP_NO_DATA,
+ intel_spi_erase,
+ HSFSTS_CTL_FCYCLE_ERASE_64K),
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1),
+ INTEL_SPI_OP_ADDR(4),
+ SPI_MEM_OP_NO_DATA,
+ intel_spi_erase,
+ HSFSTS_CTL_FCYCLE_ERASE_64K),
+ { },
+};
+
+static int intel_spi_init(struct intel_spi *ispi)
+{
+ u32 opmenu0, opmenu1, lvscc, uvscc, val;
+ bool erase_64k = false;
+ int i;
+
+ switch (ispi->info->type) {
+ case INTEL_SPI_BYT:
+ ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + BYT_PR;
+ ispi->nregions = BYT_FREG_NUM;
+ ispi->pr_num = BYT_PR_NUM;
+ ispi->swseq_reg = true;
+ break;
+
+ case INTEL_SPI_LPT:
+ ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + LPT_PR;
+ ispi->nregions = LPT_FREG_NUM;
+ ispi->pr_num = LPT_PR_NUM;
+ ispi->swseq_reg = true;
+ break;
+
+ case INTEL_SPI_BXT:
+ ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + BXT_PR;
+ ispi->nregions = BXT_FREG_NUM;
+ ispi->pr_num = BXT_PR_NUM;
+ erase_64k = true;
+ break;
+
+ case INTEL_SPI_CNL:
+ ispi->sregs = NULL;
+ ispi->pregs = ispi->base + CNL_PR;
+ ispi->nregions = CNL_FREG_NUM;
+ ispi->pr_num = CNL_PR_NUM;
+ erase_64k = true;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Try to disable write protection if user asked to do so */
+ if (writeable && !intel_spi_set_writeable(ispi)) {
+ dev_warn(ispi->dev, "can't disable chip write protection\n");
+ writeable = false;
+ }
+
+ /* Disable #SMI generation from HW sequencer */
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~HSFSTS_CTL_FSMIE;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ /*
+ * Determine whether erase operation should use HW or SW sequencer.
+ *
+ * The HW sequencer has a predefined list of opcodes, with only the
+ * erase opcode being programmable in LVSCC and UVSCC registers.
+ * If these registers don't contain a valid erase opcode, erase
+ * cannot be done using HW sequencer.
+ */
+ lvscc = readl(ispi->base + LVSCC);
+ uvscc = readl(ispi->base + UVSCC);
+ if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
+ ispi->swseq_erase = true;
+ /* SPI controller on Intel BXT supports 64K erase opcode */
+ if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
+ if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
+ !(uvscc & ERASE_64K_OPCODE_MASK))
+ erase_64k = false;
+
+ if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) {
+ dev_err(ispi->dev, "software sequencer not supported, but required\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Some controllers can only do basic operations using hardware
+ * sequencer. All other operations are supposed to be carried out
+ * using software sequencer.
+ */
+ if (ispi->swseq_reg) {
+ /* Disable #SMI generation from SW sequencer */
+ val = readl(ispi->sregs + SSFSTS_CTL);
+ val &= ~SSFSTS_CTL_FSMIE;
+ writel(val, ispi->sregs + SSFSTS_CTL);
+ }
+
+ /* Check controller's lock status */
+ val = readl(ispi->base + HSFSTS_CTL);
+ ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
+
+ if (ispi->locked && ispi->sregs) {
+ /*
+ * BIOS programs allowed opcodes and then locks down the
+ * register. So read back what opcodes it decided to support.
+ * That's the set we are going to support as well.
+ */
+ opmenu0 = readl(ispi->sregs + OPMENU0);
+ opmenu1 = readl(ispi->sregs + OPMENU1);
+
+ if (opmenu0 && opmenu1) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+ ispi->opcodes[i] = opmenu0 >> i * 8;
+ ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+ }
+ }
+ }
+
+ if (erase_64k) {
+ dev_dbg(ispi->dev, "Using erase_64k memory operations");
+ ispi->mem_ops = erase_64k_mem_ops;
+ } else {
+ dev_dbg(ispi->dev, "Using generic memory operations");
+ ispi->mem_ops = generic_mem_ops;
+ }
+
+ intel_spi_dump_regs(ispi);
+ return 0;
+}
+
+static bool intel_spi_is_protected(const struct intel_spi *ispi,
+ unsigned int base, unsigned int limit)
+{
+ int i;
+
+ for (i = 0; i < ispi->pr_num; i++) {
+ u32 pr_base, pr_limit, pr_value;
+
+ pr_value = readl(ispi->pregs + PR(i));
+ if (!(pr_value & (PR_WPE | PR_RPE)))
+ continue;
+
+ pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
+ pr_base = pr_value & PR_BASE_MASK;
+
+ if (pr_base >= base && pr_limit <= limit)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * There will be a single partition holding all enabled flash regions. We
+ * call this "BIOS".
+ */
+static void intel_spi_fill_partition(struct intel_spi *ispi,
+ struct mtd_partition *part)
+{
+ u64 end;
+ int i;
+
+ memset(part, 0, sizeof(*part));
+
+ /* Start from the mandatory descriptor region */
+ part->size = 4096;
+ part->name = "BIOS";
+
+ /*
+ * Now try to find where this partition ends based on the flash
+ * region registers.
+ */
+ for (i = 1; i < ispi->nregions; i++) {
+ u32 region, base, limit;
+
+ region = readl(ispi->base + FREG(i));
+ base = region & FREG_BASE_MASK;
+ limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
+
+ if (base >= limit || limit == 0)
+ continue;
+
+ /*
+ * If any of the regions have protection bits set, make the
+ * whole partition read-only to be on the safe side.
+ *
+ * Also if the user did not ask the chip to be writeable
+ * mask the bit too.
+ */
+ if (!writeable || intel_spi_is_protected(ispi, base, limit))
+ part->mask_flags |= MTD_WRITEABLE;
+
+ end = (limit << 12) + 4096;
+ if (end > part->size)
+ part->size = end;
+ }
+}
+
+static int intel_spi_read_desc(struct intel_spi *ispi)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 0),
+ SPI_MEM_OP_ADDR(3, 0, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(0, NULL, 0));
+ u32 buf[2], nc, fcba, flcomp;
+ ssize_t ret;
+
+ op.addr.val = 0x10;
+ op.data.buf.in = buf;
+ op.data.nbytes = sizeof(buf);
+
+ ret = intel_spi_read(ispi, NULL, NULL, &op);
+ if (ret) {
+ dev_warn(ispi->dev, "failed to read descriptor\n");
+ return ret;
+ }
+
+ dev_dbg(ispi->dev, "FLVALSIG=0x%08x\n", buf[0]);
+ dev_dbg(ispi->dev, "FLMAP0=0x%08x\n", buf[1]);
+
+ if (buf[0] != FLVALSIG_MAGIC) {
+ dev_warn(ispi->dev, "descriptor signature not valid\n");
+ return -ENODEV;
+ }
+
+ fcba = (buf[1] & FLMAP0_FCBA_MASK) << 4;
+ dev_dbg(ispi->dev, "FCBA=%#x\n", fcba);
+
+ op.addr.val = fcba;
+ op.data.buf.in = &flcomp;
+ op.data.nbytes = sizeof(flcomp);
+
+ ret = intel_spi_read(ispi, NULL, NULL, &op);
+ if (ret) {
+ dev_warn(ispi->dev, "failed to read FLCOMP\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(ispi->dev, "FLCOMP=0x%08x\n", flcomp);
+
+ switch (flcomp & FLCOMP_C0DEN_MASK) {
+ case FLCOMP_C0DEN_512K:
+ ispi->chip0_size = SZ_512K;
+ break;
+ case FLCOMP_C0DEN_1M:
+ ispi->chip0_size = SZ_1M;
+ break;
+ case FLCOMP_C0DEN_2M:
+ ispi->chip0_size = SZ_2M;
+ break;
+ case FLCOMP_C0DEN_4M:
+ ispi->chip0_size = SZ_4M;
+ break;
+ case FLCOMP_C0DEN_8M:
+ ispi->chip0_size = SZ_8M;
+ break;
+ case FLCOMP_C0DEN_16M:
+ ispi->chip0_size = SZ_16M;
+ break;
+ case FLCOMP_C0DEN_32M:
+ ispi->chip0_size = SZ_32M;
+ break;
+ case FLCOMP_C0DEN_64M:
+ ispi->chip0_size = SZ_64M;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ispi->dev, "chip0 size %zd KB\n", ispi->chip0_size / SZ_1K);
+
+ nc = (buf[1] & FLMAP0_NC_MASK) >> FLMAP0_NC_SHIFT;
+ if (!nc)
+ ispi->master->num_chipselect = 1;
+ else if (nc == 1)
+ ispi->master->num_chipselect = 2;
+ else
+ return -EINVAL;
+
+ dev_dbg(ispi->dev, "%u flash components found\n",
+ ispi->master->num_chipselect);
+ return 0;
+}
+
+static int intel_spi_populate_chip(struct intel_spi *ispi)
+{
+ struct flash_platform_data *pdata;
+ struct spi_board_info chip;
+ int ret;
+
+ pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->nr_parts = 1;
+ pdata->parts = devm_kcalloc(ispi->dev, pdata->nr_parts,
+ sizeof(*pdata->parts), GFP_KERNEL);
+ if (!pdata->parts)
+ return -ENOMEM;
+
+ intel_spi_fill_partition(ispi, pdata->parts);
+
+ memset(&chip, 0, sizeof(chip));
+ snprintf(chip.modalias, 8, "spi-nor");
+ chip.platform_data = pdata;
+
+ if (!spi_new_device(ispi->master, &chip))
+ return -ENODEV;
+
+ ret = intel_spi_read_desc(ispi);
+ if (ret)
+ return ret;
+
+ /* Add the second chip if present */
+ if (ispi->master->num_chipselect < 2)
+ return 0;
+
+ chip.platform_data = NULL;
+ chip.chip_select = 1;
+
+ if (!spi_new_device(ispi->master, &chip))
+ return -ENODEV;
+ return 0;
+}
+
+/**
+ * intel_spi_probe() - Probe the Intel SPI flash controller
+ * @dev: Pointer to the parent device
+ * @mem: MMIO resource
+ * @info: Platform specific information
+ *
+ * Probes Intel SPI flash controller and creates the flash chip device.
+ * Returns %0 on success and negative errno in case of failure.
+ */
+int intel_spi_probe(struct device *dev, struct resource *mem,
+ const struct intel_spi_boardinfo *info)
+{
+ struct spi_controller *master;
+ struct intel_spi *ispi;
+ int ret;
+
+ master = devm_spi_alloc_master(dev, sizeof(*ispi));
+ if (!master)
+ return -ENOMEM;
+
+ master->mem_ops = &intel_spi_mem_ops;
+
+ ispi = spi_master_get_devdata(master);
+
+ ispi->base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(ispi->base))
+ return PTR_ERR(ispi->base);
+
+ ispi->dev = dev;
+ ispi->master = master;
+ ispi->info = info;
+
+ ret = intel_spi_init(ispi);
+ if (ret)
+ return ret;
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ return ret;
+
+ return intel_spi_populate_chip(ispi);
+}
+EXPORT_SYMBOL_GPL(intel_spi_probe);
+
+MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-intel.h b/drivers/spi/spi-intel.h
new file mode 100644
index 000000000..a4f0327a4
--- /dev/null
+++ b/drivers/spi/spi-intel.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016 - 2022, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#ifndef SPI_INTEL_H
+#define SPI_INTEL_H
+
+#include <linux/platform_data/x86/spi-intel.h>
+
+struct resource;
+
+int intel_spi_probe(struct device *dev, struct resource *mem,
+ const struct intel_spi_boardinfo *info);
+
+#endif /* SPI_INTEL_H */
diff --git a/drivers/spi/spi-iproc-qspi.c b/drivers/spi/spi-iproc-qspi.c
new file mode 100644
index 000000000..de297dacf
--- /dev/null
+++ b/drivers/spi/spi-iproc-qspi.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2016 Broadcom Limited
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "spi-bcm-qspi.h"
+
+#define INTR_BASE_BIT_SHIFT 0x02
+#define INTR_COUNT 0x07
+
+struct bcm_iproc_intc {
+ struct bcm_qspi_soc_intc soc_intc;
+ struct platform_device *pdev;
+ void __iomem *int_reg;
+ void __iomem *int_status_reg;
+ spinlock_t soclock;
+ bool big_endian;
+};
+
+static u32 bcm_iproc_qspi_get_l2_int_status(struct bcm_qspi_soc_intc *soc_intc)
+{
+ struct bcm_iproc_intc *priv =
+ container_of(soc_intc, struct bcm_iproc_intc, soc_intc);
+ void __iomem *mmio = priv->int_status_reg;
+ int i;
+ u32 val = 0, sts = 0;
+
+ for (i = 0; i < INTR_COUNT; i++) {
+ if (bcm_qspi_readl(priv->big_endian, mmio + (i * 4)))
+ val |= 1UL << i;
+ }
+
+ if (val & INTR_MSPI_DONE_MASK)
+ sts |= MSPI_DONE;
+
+ if (val & BSPI_LR_INTERRUPTS_ALL)
+ sts |= BSPI_DONE;
+
+ if (val & BSPI_LR_INTERRUPTS_ERROR)
+ sts |= BSPI_ERR;
+
+ return sts;
+}
+
+static void bcm_iproc_qspi_int_ack(struct bcm_qspi_soc_intc *soc_intc, int type)
+{
+ struct bcm_iproc_intc *priv =
+ container_of(soc_intc, struct bcm_iproc_intc, soc_intc);
+ void __iomem *mmio = priv->int_status_reg;
+ u32 mask = get_qspi_mask(type);
+ int i;
+
+ for (i = 0; i < INTR_COUNT; i++) {
+ if (mask & (1UL << i))
+ bcm_qspi_writel(priv->big_endian, 1, mmio + (i * 4));
+ }
+}
+
+static void bcm_iproc_qspi_int_set(struct bcm_qspi_soc_intc *soc_intc, int type,
+ bool en)
+{
+ struct bcm_iproc_intc *priv =
+ container_of(soc_intc, struct bcm_iproc_intc, soc_intc);
+ void __iomem *mmio = priv->int_reg;
+ u32 mask = get_qspi_mask(type);
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->soclock, flags);
+
+ val = bcm_qspi_readl(priv->big_endian, mmio);
+
+ if (en)
+ val = val | (mask << INTR_BASE_BIT_SHIFT);
+ else
+ val = val & ~(mask << INTR_BASE_BIT_SHIFT);
+
+ bcm_qspi_writel(priv->big_endian, val, mmio);
+
+ spin_unlock_irqrestore(&priv->soclock, flags);
+}
+
+static int bcm_iproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm_iproc_intc *priv;
+ struct bcm_qspi_soc_intc *soc_intc;
+ struct resource *res;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc_intc = &priv->soc_intc;
+ priv->pdev = pdev;
+
+ spin_lock_init(&priv->soclock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr_regs");
+ priv->int_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->int_reg))
+ return PTR_ERR(priv->int_reg);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "intr_status_reg");
+ priv->int_status_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->int_status_reg))
+ return PTR_ERR(priv->int_status_reg);
+
+ priv->big_endian = of_device_is_big_endian(dev->of_node);
+
+ bcm_iproc_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
+ bcm_iproc_qspi_int_set(soc_intc, MSPI_BSPI_DONE, false);
+
+ soc_intc->bcm_qspi_int_ack = bcm_iproc_qspi_int_ack;
+ soc_intc->bcm_qspi_int_set = bcm_iproc_qspi_int_set;
+ soc_intc->bcm_qspi_get_int_status = bcm_iproc_qspi_get_l2_int_status;
+
+ return bcm_qspi_probe(pdev, soc_intc);
+}
+
+static int bcm_iproc_remove(struct platform_device *pdev)
+{
+ return bcm_qspi_remove(pdev);
+}
+
+static const struct of_device_id bcm_iproc_of_match[] = {
+ { .compatible = "brcm,spi-nsp-qspi" },
+ { .compatible = "brcm,spi-ns2-qspi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm_iproc_of_match);
+
+static struct platform_driver bcm_iproc_driver = {
+ .probe = bcm_iproc_probe,
+ .remove = bcm_iproc_remove,
+ .driver = {
+ .name = "bcm_iproc",
+ .pm = &bcm_qspi_pm_ops,
+ .of_match_table = bcm_iproc_of_match,
+ }
+};
+module_platform_driver(bcm_iproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kamal Dasu");
+MODULE_DESCRIPTION("SPI flash driver for Broadcom iProc SoCs");
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
new file mode 100644
index 000000000..74c8319c2
--- /dev/null
+++ b/drivers/spi/spi-jcore.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * J-Core SPI controller driver
+ *
+ * Copyright (C) 2012-2016 Smart Energy Instruments, Inc.
+ *
+ * Current version by Rich Felker
+ * Based loosely on initial version by Oleksandr G Zhadan
+ *
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+
+#define DRV_NAME "jcore_spi"
+
+#define CTRL_REG 0x0
+#define DATA_REG 0x4
+
+#define JCORE_SPI_CTRL_XMIT 0x02
+#define JCORE_SPI_STAT_BUSY 0x02
+#define JCORE_SPI_CTRL_LOOP 0x08
+#define JCORE_SPI_CTRL_CS_BITS 0x15
+
+#define JCORE_SPI_WAIT_RDY_MAX_LOOP 2000000
+
+struct jcore_spi {
+ struct spi_master *master;
+ void __iomem *base;
+ unsigned int cs_reg;
+ unsigned int speed_reg;
+ unsigned int speed_hz;
+ unsigned int clock_freq;
+};
+
+static int jcore_spi_wait(void __iomem *ctrl_reg)
+{
+ unsigned timeout = JCORE_SPI_WAIT_RDY_MAX_LOOP;
+
+ do {
+ if (!(readl(ctrl_reg) & JCORE_SPI_STAT_BUSY))
+ return 0;
+ cpu_relax();
+ } while (--timeout);
+
+ return -EBUSY;
+}
+
+static void jcore_spi_program(struct jcore_spi *hw)
+{
+ void __iomem *ctrl_reg = hw->base + CTRL_REG;
+
+ if (jcore_spi_wait(ctrl_reg))
+ dev_err(hw->master->dev.parent,
+ "timeout waiting to program ctrl reg.\n");
+
+ writel(hw->cs_reg | hw->speed_reg, ctrl_reg);
+}
+
+static void jcore_spi_chipsel(struct spi_device *spi, bool value)
+{
+ struct jcore_spi *hw = spi_master_get_devdata(spi->master);
+ u32 csbit = 1U << (2 * spi->chip_select);
+
+ dev_dbg(hw->master->dev.parent, "chipselect %d\n", spi->chip_select);
+
+ if (value)
+ hw->cs_reg |= csbit;
+ else
+ hw->cs_reg &= ~csbit;
+
+ jcore_spi_program(hw);
+}
+
+static void jcore_spi_baudrate(struct jcore_spi *hw, int speed)
+{
+ if (speed == hw->speed_hz)
+ return;
+ hw->speed_hz = speed;
+ if (speed >= hw->clock_freq / 2)
+ hw->speed_reg = 0;
+ else
+ hw->speed_reg = ((hw->clock_freq / 2 / speed) - 1) << 27;
+ jcore_spi_program(hw);
+ dev_dbg(hw->master->dev.parent, "speed=%d reg=0x%x\n",
+ speed, hw->speed_reg);
+}
+
+static int jcore_spi_txrx(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct jcore_spi *hw = spi_master_get_devdata(master);
+
+ void __iomem *ctrl_reg = hw->base + CTRL_REG;
+ void __iomem *data_reg = hw->base + DATA_REG;
+ u32 xmit;
+
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+ unsigned int len;
+ unsigned int count;
+
+ jcore_spi_baudrate(hw, t->speed_hz);
+
+ xmit = hw->cs_reg | hw->speed_reg | JCORE_SPI_CTRL_XMIT;
+ tx = t->tx_buf;
+ rx = t->rx_buf;
+ len = t->len;
+
+ for (count = 0; count < len; count++) {
+ if (jcore_spi_wait(ctrl_reg))
+ break;
+
+ writel(tx ? *tx++ : 0, data_reg);
+ writel(xmit, ctrl_reg);
+
+ if (jcore_spi_wait(ctrl_reg))
+ break;
+
+ if (rx)
+ *rx++ = readl(data_reg);
+ }
+
+ spi_finalize_current_transfer(master);
+
+ if (count < len)
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int jcore_spi_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct jcore_spi *hw;
+ struct spi_master *master;
+ struct resource *res;
+ u32 clock_freq;
+ struct clk *clk;
+ int err = -ENODEV;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct jcore_spi));
+ if (!master)
+ return err;
+
+ /* Setup the master state. */
+ master->num_chipselect = 3;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->transfer_one = jcore_spi_txrx;
+ master->set_cs = jcore_spi_chipsel;
+ master->dev.of_node = node;
+ master->bus_num = pdev->id;
+
+ hw = spi_master_get_devdata(master);
+ hw->master = master;
+ platform_set_drvdata(pdev, hw);
+
+ /* Find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto exit_busy;
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name))
+ goto exit_busy;
+ hw->base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!hw->base)
+ goto exit_busy;
+
+ /*
+ * The SPI clock rate controlled via a configurable clock divider
+ * which is applied to the reference clock. A 50 MHz reference is
+ * most suitable for obtaining standard SPI clock rates, but some
+ * designs may have a different reference clock, and the DT must
+ * make the driver aware so that it can properly program the
+ * requested rate. If the clock is omitted, 50 MHz is assumed.
+ */
+ clock_freq = 50000000;
+ clk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (!IS_ERR(clk)) {
+ if (clk_prepare_enable(clk) == 0) {
+ clock_freq = clk_get_rate(clk);
+ clk_disable_unprepare(clk);
+ } else
+ dev_warn(&pdev->dev, "could not enable ref_clk\n");
+ }
+ hw->clock_freq = clock_freq;
+
+ /* Initialize all CS bits to high. */
+ hw->cs_reg = JCORE_SPI_CTRL_CS_BITS;
+ jcore_spi_baudrate(hw, 400000);
+
+ /* Register our spi controller */
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err)
+ goto exit;
+
+ return 0;
+
+exit_busy:
+ err = -EBUSY;
+exit:
+ spi_master_put(master);
+ return err;
+}
+
+static const struct of_device_id jcore_spi_of_match[] = {
+ { .compatible = "jcore,spi2" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jcore_spi_of_match);
+
+static struct platform_driver jcore_spi_driver = {
+ .probe = jcore_spi_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = jcore_spi_of_match,
+ },
+};
+
+module_platform_driver(jcore_spi_driver);
+
+MODULE_DESCRIPTION("J-Core SPI driver");
+MODULE_AUTHOR("Rich Felker <dalias@libc.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
new file mode 100644
index 000000000..aae26f62e
--- /dev/null
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
+ * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#ifdef CONFIG_LANTIQ
+#include <lantiq_soc.h>
+#endif
+
+#define LTQ_SPI_RX_IRQ_NAME "spi_rx"
+#define LTQ_SPI_TX_IRQ_NAME "spi_tx"
+#define LTQ_SPI_ERR_IRQ_NAME "spi_err"
+#define LTQ_SPI_FRM_IRQ_NAME "spi_frm"
+
+#define LTQ_SPI_CLC 0x00
+#define LTQ_SPI_PISEL 0x04
+#define LTQ_SPI_ID 0x08
+#define LTQ_SPI_CON 0x10
+#define LTQ_SPI_STAT 0x14
+#define LTQ_SPI_WHBSTATE 0x18
+#define LTQ_SPI_TB 0x20
+#define LTQ_SPI_RB 0x24
+#define LTQ_SPI_RXFCON 0x30
+#define LTQ_SPI_TXFCON 0x34
+#define LTQ_SPI_FSTAT 0x38
+#define LTQ_SPI_BRT 0x40
+#define LTQ_SPI_BRSTAT 0x44
+#define LTQ_SPI_SFCON 0x60
+#define LTQ_SPI_SFSTAT 0x64
+#define LTQ_SPI_GPOCON 0x70
+#define LTQ_SPI_GPOSTAT 0x74
+#define LTQ_SPI_FPGO 0x78
+#define LTQ_SPI_RXREQ 0x80
+#define LTQ_SPI_RXCNT 0x84
+#define LTQ_SPI_DMACON 0xec
+#define LTQ_SPI_IRNEN 0xf4
+
+#define LTQ_SPI_CLC_SMC_S 16 /* Clock divider for sleep mode */
+#define LTQ_SPI_CLC_SMC_M (0xFF << LTQ_SPI_CLC_SMC_S)
+#define LTQ_SPI_CLC_RMC_S 8 /* Clock divider for normal run mode */
+#define LTQ_SPI_CLC_RMC_M (0xFF << LTQ_SPI_CLC_RMC_S)
+#define LTQ_SPI_CLC_DISS BIT(1) /* Disable status bit */
+#define LTQ_SPI_CLC_DISR BIT(0) /* Disable request bit */
+
+#define LTQ_SPI_ID_TXFS_S 24 /* Implemented TX FIFO size */
+#define LTQ_SPI_ID_RXFS_S 16 /* Implemented RX FIFO size */
+#define LTQ_SPI_ID_MOD_S 8 /* Module ID */
+#define LTQ_SPI_ID_MOD_M (0xff << LTQ_SPI_ID_MOD_S)
+#define LTQ_SPI_ID_CFG_S 5 /* DMA interface support */
+#define LTQ_SPI_ID_CFG_M (1 << LTQ_SPI_ID_CFG_S)
+#define LTQ_SPI_ID_REV_M 0x1F /* Hardware revision number */
+
+#define LTQ_SPI_CON_BM_S 16 /* Data width selection */
+#define LTQ_SPI_CON_BM_M (0x1F << LTQ_SPI_CON_BM_S)
+#define LTQ_SPI_CON_EM BIT(24) /* Echo mode */
+#define LTQ_SPI_CON_IDLE BIT(23) /* Idle bit value */
+#define LTQ_SPI_CON_ENBV BIT(22) /* Enable byte valid control */
+#define LTQ_SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
+#define LTQ_SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
+#define LTQ_SPI_CON_AEN BIT(10) /* Abort error enable */
+#define LTQ_SPI_CON_REN BIT(9) /* Receive overflow error enable */
+#define LTQ_SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
+#define LTQ_SPI_CON_LB BIT(7) /* Loopback control */
+#define LTQ_SPI_CON_PO BIT(6) /* Clock polarity control */
+#define LTQ_SPI_CON_PH BIT(5) /* Clock phase control */
+#define LTQ_SPI_CON_HB BIT(4) /* Heading control */
+#define LTQ_SPI_CON_RXOFF BIT(1) /* Switch receiver off */
+#define LTQ_SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
+
+#define LTQ_SPI_STAT_RXBV_S 28
+#define LTQ_SPI_STAT_RXBV_M (0x7 << LTQ_SPI_STAT_RXBV_S)
+#define LTQ_SPI_STAT_BSY BIT(13) /* Busy flag */
+#define LTQ_SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
+#define LTQ_SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
+#define LTQ_SPI_STAT_AE BIT(10) /* Abort error flag */
+#define LTQ_SPI_STAT_RE BIT(9) /* Receive error flag */
+#define LTQ_SPI_STAT_TE BIT(8) /* Transmit error flag */
+#define LTQ_SPI_STAT_ME BIT(7) /* Mode error flag */
+#define LTQ_SPI_STAT_MS BIT(1) /* Master/slave select bit */
+#define LTQ_SPI_STAT_EN BIT(0) /* Enable bit */
+#define LTQ_SPI_STAT_ERRORS (LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \
+ LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \
+ LTQ_SPI_STAT_TUE | LTQ_SPI_STAT_RUE)
+
+#define LTQ_SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
+#define LTQ_SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
+#define LTQ_SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
+#define LTQ_SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
+#define LTQ_SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */
+#define LTQ_SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
+#define LTQ_SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
+#define LTQ_SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
+#define LTQ_SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
+#define LTQ_SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
+#define LTQ_SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
+#define LTQ_SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
+#define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set master select bit */
+#define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear master select bit */
+#define LTQ_SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
+#define LTQ_SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
+#define LTQ_SPI_WHBSTATE_CLR_ERRORS (LTQ_SPI_WHBSTATE_CLRRUE | \
+ LTQ_SPI_WHBSTATE_CLRME | \
+ LTQ_SPI_WHBSTATE_CLRTE | \
+ LTQ_SPI_WHBSTATE_CLRRE | \
+ LTQ_SPI_WHBSTATE_CLRAE | \
+ LTQ_SPI_WHBSTATE_CLRTUE)
+
+#define LTQ_SPI_RXFCON_RXFITL_S 8 /* FIFO interrupt trigger level */
+#define LTQ_SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
+#define LTQ_SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
+
+#define LTQ_SPI_TXFCON_TXFITL_S 8 /* FIFO interrupt trigger level */
+#define LTQ_SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
+#define LTQ_SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
+
+#define LTQ_SPI_FSTAT_RXFFL_S 0
+#define LTQ_SPI_FSTAT_TXFFL_S 8
+
+#define LTQ_SPI_GPOCON_ISCSBN_S 8
+#define LTQ_SPI_GPOCON_INVOUTN_S 0
+
+#define LTQ_SPI_FGPO_SETOUTN_S 8
+#define LTQ_SPI_FGPO_CLROUTN_S 0
+
+#define LTQ_SPI_RXREQ_RXCNT_M 0xFFFF /* Receive count value */
+#define LTQ_SPI_RXCNT_TODO_M 0xFFFF /* Recevie to-do value */
+
+#define LTQ_SPI_IRNEN_TFI BIT(4) /* TX finished interrupt */
+#define LTQ_SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
+#define LTQ_SPI_IRNEN_E BIT(2) /* Error end interrupt request */
+#define LTQ_SPI_IRNEN_T_XWAY BIT(1) /* Transmit end interrupt request */
+#define LTQ_SPI_IRNEN_R_XWAY BIT(0) /* Receive end interrupt request */
+#define LTQ_SPI_IRNEN_R_XRX BIT(1) /* Transmit end interrupt request */
+#define LTQ_SPI_IRNEN_T_XRX BIT(0) /* Receive end interrupt request */
+#define LTQ_SPI_IRNEN_ALL 0x1F
+
+struct lantiq_ssc_spi;
+
+struct lantiq_ssc_hwcfg {
+ int (*cfg_irq)(struct platform_device *pdev, struct lantiq_ssc_spi *spi);
+ unsigned int irnen_r;
+ unsigned int irnen_t;
+ unsigned int irncr;
+ unsigned int irnicr;
+ bool irq_ack;
+ u32 fifo_size_mask;
+};
+
+struct lantiq_ssc_spi {
+ struct spi_master *master;
+ struct device *dev;
+ void __iomem *regbase;
+ struct clk *spi_clk;
+ struct clk *fpi_clk;
+ const struct lantiq_ssc_hwcfg *hwcfg;
+
+ spinlock_t lock;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+
+ const u8 *tx;
+ u8 *rx;
+ unsigned int tx_todo;
+ unsigned int rx_todo;
+ unsigned int bits_per_word;
+ unsigned int speed_hz;
+ unsigned int tx_fifo_size;
+ unsigned int rx_fifo_size;
+ unsigned int base_cs;
+ unsigned int fdx_tx_level;
+};
+
+static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
+{
+ return __raw_readl(spi->regbase + reg);
+}
+
+static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val,
+ u32 reg)
+{
+ __raw_writel(val, spi->regbase + reg);
+}
+
+static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
+ u32 set, u32 reg)
+{
+ u32 val = __raw_readl(spi->regbase + reg);
+
+ val &= ~clr;
+ val |= set;
+ __raw_writel(val, spi->regbase + reg);
+}
+
+static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
+{
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
+ u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
+
+ return (fstat >> LTQ_SPI_FSTAT_TXFFL_S) & hwcfg->fifo_size_mask;
+}
+
+static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
+{
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
+ u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
+
+ return (fstat >> LTQ_SPI_FSTAT_RXFFL_S) & hwcfg->fifo_size_mask;
+}
+
+static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
+{
+ return spi->tx_fifo_size - tx_fifo_level(spi);
+}
+
+static void rx_fifo_reset(const struct lantiq_ssc_spi *spi)
+{
+ u32 val = spi->rx_fifo_size << LTQ_SPI_RXFCON_RXFITL_S;
+
+ val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
+ lantiq_ssc_writel(spi, val, LTQ_SPI_RXFCON);
+}
+
+static void tx_fifo_reset(const struct lantiq_ssc_spi *spi)
+{
+ u32 val = 1 << LTQ_SPI_TXFCON_TXFITL_S;
+
+ val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
+ lantiq_ssc_writel(spi, val, LTQ_SPI_TXFCON);
+}
+
+static void rx_fifo_flush(const struct lantiq_ssc_spi *spi)
+{
+ lantiq_ssc_maskl(spi, 0, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
+}
+
+static void tx_fifo_flush(const struct lantiq_ssc_spi *spi)
+{
+ lantiq_ssc_maskl(spi, 0, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
+}
+
+static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi)
+{
+ lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
+}
+
+static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi)
+{
+ lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
+}
+
+static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi,
+ unsigned int max_speed_hz)
+{
+ u32 spi_clk, brt;
+
+ /*
+ * SPI module clock is derived from FPI bus clock dependent on
+ * divider value in CLC.RMS which is always set to 1.
+ *
+ * f_SPI
+ * baudrate = --------------
+ * 2 * (BR + 1)
+ */
+ spi_clk = clk_get_rate(spi->fpi_clk) / 2;
+
+ if (max_speed_hz > spi_clk)
+ brt = 0;
+ else
+ brt = spi_clk / max_speed_hz - 1;
+
+ if (brt > 0xFFFF)
+ brt = 0xFFFF;
+
+ dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
+ spi_clk, max_speed_hz, brt);
+
+ lantiq_ssc_writel(spi, brt, LTQ_SPI_BRT);
+}
+
+static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi,
+ unsigned int bits_per_word)
+{
+ u32 bm;
+
+ /* CON.BM value = bits_per_word - 1 */
+ bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_S;
+
+ lantiq_ssc_maskl(spi, LTQ_SPI_CON_BM_M, bm, LTQ_SPI_CON);
+}
+
+static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi,
+ unsigned int mode)
+{
+ u32 con_set = 0, con_clr = 0;
+
+ /*
+ * SPI mode mapping in CON register:
+ * Mode CPOL CPHA CON.PO CON.PH
+ * 0 0 0 0 1
+ * 1 0 1 0 0
+ * 2 1 0 1 1
+ * 3 1 1 1 0
+ */
+ if (mode & SPI_CPHA)
+ con_clr |= LTQ_SPI_CON_PH;
+ else
+ con_set |= LTQ_SPI_CON_PH;
+
+ if (mode & SPI_CPOL)
+ con_set |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
+ else
+ con_clr |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
+
+ /* Set heading control */
+ if (mode & SPI_LSB_FIRST)
+ con_clr |= LTQ_SPI_CON_HB;
+ else
+ con_set |= LTQ_SPI_CON_HB;
+
+ /* Set loopback mode */
+ if (mode & SPI_LOOP)
+ con_set |= LTQ_SPI_CON_LB;
+ else
+ con_clr |= LTQ_SPI_CON_LB;
+
+ lantiq_ssc_maskl(spi, con_clr, con_set, LTQ_SPI_CON);
+}
+
+static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
+{
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
+
+ /*
+ * Set clock divider for run mode to 1 to
+ * run at same frequency as FPI bus
+ */
+ lantiq_ssc_writel(spi, 1 << LTQ_SPI_CLC_RMC_S, LTQ_SPI_CLC);
+
+ /* Put controller into config mode */
+ hw_enter_config_mode(spi);
+
+ /* Clear error flags */
+ lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
+
+ /* Enable error checking, disable TX/RX */
+ lantiq_ssc_writel(spi, LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
+ LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN | LTQ_SPI_CON_TXOFF |
+ LTQ_SPI_CON_RXOFF, LTQ_SPI_CON);
+
+ /* Setup default SPI mode */
+ hw_setup_bits_per_word(spi, spi->bits_per_word);
+ hw_setup_clock_mode(spi, SPI_MODE_0);
+
+ /* Enable master mode and clear error flags */
+ lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETMS |
+ LTQ_SPI_WHBSTATE_CLR_ERRORS,
+ LTQ_SPI_WHBSTATE);
+
+ /* Reset GPIO/CS registers */
+ lantiq_ssc_writel(spi, 0, LTQ_SPI_GPOCON);
+ lantiq_ssc_writel(spi, 0xFF00, LTQ_SPI_FPGO);
+
+ /* Enable and flush FIFOs */
+ rx_fifo_reset(spi);
+ tx_fifo_reset(spi);
+
+ /* Enable interrupts */
+ lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r |
+ LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
+}
+
+static int lantiq_ssc_setup(struct spi_device *spidev)
+{
+ struct spi_master *master = spidev->master;
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+ unsigned int cs = spidev->chip_select;
+ u32 gpocon;
+
+ /* GPIOs are used for CS */
+ if (spidev->cs_gpiod)
+ return 0;
+
+ dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
+
+ if (cs < spi->base_cs) {
+ dev_err(spi->dev,
+ "chipselect %i too small (min %i)\n", cs, spi->base_cs);
+ return -EINVAL;
+ }
+
+ /* set GPO pin to CS mode */
+ gpocon = 1 << ((cs - spi->base_cs) + LTQ_SPI_GPOCON_ISCSBN_S);
+
+ /* invert GPO pin */
+ if (spidev->mode & SPI_CS_HIGH)
+ gpocon |= 1 << (cs - spi->base_cs);
+
+ lantiq_ssc_maskl(spi, 0, gpocon, LTQ_SPI_GPOCON);
+
+ return 0;
+}
+
+static int lantiq_ssc_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+ hw_enter_config_mode(spi);
+ hw_setup_clock_mode(spi, message->spi->mode);
+ hw_enter_active_mode(spi);
+
+ return 0;
+}
+
+static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
+ struct spi_device *spidev, struct spi_transfer *t)
+{
+ unsigned int speed_hz = t->speed_hz;
+ unsigned int bits_per_word = t->bits_per_word;
+ u32 con;
+
+ if (bits_per_word != spi->bits_per_word ||
+ speed_hz != spi->speed_hz) {
+ hw_enter_config_mode(spi);
+ hw_setup_speed_hz(spi, speed_hz);
+ hw_setup_bits_per_word(spi, bits_per_word);
+ hw_enter_active_mode(spi);
+
+ spi->speed_hz = speed_hz;
+ spi->bits_per_word = bits_per_word;
+ }
+
+ /* Configure transmitter and receiver */
+ con = lantiq_ssc_readl(spi, LTQ_SPI_CON);
+ if (t->tx_buf)
+ con &= ~LTQ_SPI_CON_TXOFF;
+ else
+ con |= LTQ_SPI_CON_TXOFF;
+
+ if (t->rx_buf)
+ con &= ~LTQ_SPI_CON_RXOFF;
+ else
+ con |= LTQ_SPI_CON_RXOFF;
+
+ lantiq_ssc_writel(spi, con, LTQ_SPI_CON);
+}
+
+static int lantiq_ssc_unprepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+ flush_workqueue(spi->wq);
+
+ /* Disable transmitter and receiver while idle */
+ lantiq_ssc_maskl(spi, 0, LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF,
+ LTQ_SPI_CON);
+
+ return 0;
+}
+
+static void tx_fifo_write(struct lantiq_ssc_spi *spi)
+{
+ const u8 *tx8;
+ const u16 *tx16;
+ const u32 *tx32;
+ u32 data;
+ unsigned int tx_free = tx_fifo_free(spi);
+
+ spi->fdx_tx_level = 0;
+ while (spi->tx_todo && tx_free) {
+ switch (spi->bits_per_word) {
+ case 2 ... 8:
+ tx8 = spi->tx;
+ data = *tx8;
+ spi->tx_todo--;
+ spi->tx++;
+ break;
+ case 16:
+ tx16 = (u16 *) spi->tx;
+ data = *tx16;
+ spi->tx_todo -= 2;
+ spi->tx += 2;
+ break;
+ case 32:
+ tx32 = (u32 *) spi->tx;
+ data = *tx32;
+ spi->tx_todo -= 4;
+ spi->tx += 4;
+ break;
+ default:
+ WARN_ON(1);
+ data = 0;
+ break;
+ }
+
+ lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
+ tx_free--;
+ spi->fdx_tx_level++;
+ }
+}
+
+static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
+{
+ u8 *rx8;
+ u16 *rx16;
+ u32 *rx32;
+ u32 data;
+ unsigned int rx_fill = rx_fifo_level(spi);
+
+ /*
+ * Wait until all expected data to be shifted in.
+ * Otherwise, rx overrun may occur.
+ */
+ while (rx_fill != spi->fdx_tx_level)
+ rx_fill = rx_fifo_level(spi);
+
+ while (rx_fill) {
+ data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
+
+ switch (spi->bits_per_word) {
+ case 2 ... 8:
+ rx8 = spi->rx;
+ *rx8 = data;
+ spi->rx_todo--;
+ spi->rx++;
+ break;
+ case 16:
+ rx16 = (u16 *) spi->rx;
+ *rx16 = data;
+ spi->rx_todo -= 2;
+ spi->rx += 2;
+ break;
+ case 32:
+ rx32 = (u32 *) spi->rx;
+ *rx32 = data;
+ spi->rx_todo -= 4;
+ spi->rx += 4;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ rx_fill--;
+ }
+}
+
+static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi)
+{
+ u32 data, *rx32;
+ u8 *rx8;
+ unsigned int rxbv, shift;
+ unsigned int rx_fill = rx_fifo_level(spi);
+
+ /*
+ * In RX-only mode the bits per word value is ignored by HW. A value
+ * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
+ * If remaining RX bytes are less than 4, the FIFO must be read
+ * differently. The amount of received and valid bytes is indicated
+ * by STAT.RXBV register value.
+ */
+ while (rx_fill) {
+ if (spi->rx_todo < 4) {
+ rxbv = (lantiq_ssc_readl(spi, LTQ_SPI_STAT) &
+ LTQ_SPI_STAT_RXBV_M) >> LTQ_SPI_STAT_RXBV_S;
+ data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
+
+ shift = (rxbv - 1) * 8;
+ rx8 = spi->rx;
+
+ while (rxbv) {
+ *rx8++ = (data >> shift) & 0xFF;
+ rxbv--;
+ shift -= 8;
+ spi->rx_todo--;
+ spi->rx++;
+ }
+ } else {
+ data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
+ rx32 = (u32 *) spi->rx;
+
+ *rx32++ = data;
+ spi->rx_todo -= 4;
+ spi->rx += 4;
+ }
+ rx_fill--;
+ }
+}
+
+static void rx_request(struct lantiq_ssc_spi *spi)
+{
+ unsigned int rxreq, rxreq_max;
+
+ /*
+ * To avoid receive overflows at high clocks it is better to request
+ * only the amount of bytes that fits into all FIFOs. This value
+ * depends on the FIFO size implemented in hardware.
+ */
+ rxreq = spi->rx_todo;
+ rxreq_max = spi->rx_fifo_size * 4;
+ if (rxreq > rxreq_max)
+ rxreq = rxreq_max;
+
+ lantiq_ssc_writel(spi, rxreq, LTQ_SPI_RXREQ);
+}
+
+static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
+{
+ struct lantiq_ssc_spi *spi = data;
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
+ u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
+
+ spin_lock(&spi->lock);
+ if (hwcfg->irq_ack)
+ lantiq_ssc_writel(spi, val, hwcfg->irncr);
+
+ if (spi->tx) {
+ if (spi->rx && spi->rx_todo)
+ rx_fifo_read_full_duplex(spi);
+
+ if (spi->tx_todo)
+ tx_fifo_write(spi);
+ else if (!tx_fifo_level(spi))
+ goto completed;
+ } else if (spi->rx) {
+ if (spi->rx_todo) {
+ rx_fifo_read_half_duplex(spi);
+
+ if (spi->rx_todo)
+ rx_request(spi);
+ else
+ goto completed;
+ } else {
+ goto completed;
+ }
+ }
+
+ spin_unlock(&spi->lock);
+ return IRQ_HANDLED;
+
+completed:
+ queue_work(spi->wq, &spi->work);
+ spin_unlock(&spi->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
+{
+ struct lantiq_ssc_spi *spi = data;
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
+ u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
+ u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
+
+ if (!(stat & LTQ_SPI_STAT_ERRORS))
+ return IRQ_NONE;
+
+ spin_lock(&spi->lock);
+ if (hwcfg->irq_ack)
+ lantiq_ssc_writel(spi, val, hwcfg->irncr);
+
+ if (stat & LTQ_SPI_STAT_RUE)
+ dev_err(spi->dev, "receive underflow error\n");
+ if (stat & LTQ_SPI_STAT_TUE)
+ dev_err(spi->dev, "transmit underflow error\n");
+ if (stat & LTQ_SPI_STAT_AE)
+ dev_err(spi->dev, "abort error\n");
+ if (stat & LTQ_SPI_STAT_RE)
+ dev_err(spi->dev, "receive overflow error\n");
+ if (stat & LTQ_SPI_STAT_TE)
+ dev_err(spi->dev, "transmit overflow error\n");
+ if (stat & LTQ_SPI_STAT_ME)
+ dev_err(spi->dev, "mode error\n");
+
+ /* Clear error flags */
+ lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
+
+ /* set bad status so it can be retried */
+ if (spi->master->cur_msg)
+ spi->master->cur_msg->status = -EIO;
+ queue_work(spi->wq, &spi->work);
+ spin_unlock(&spi->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t intel_lgm_ssc_isr(int irq, void *data)
+{
+ struct lantiq_ssc_spi *spi = data;
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
+ u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
+
+ if (!(val & LTQ_SPI_IRNEN_ALL))
+ return IRQ_NONE;
+
+ if (val & LTQ_SPI_IRNEN_E)
+ return lantiq_ssc_err_interrupt(irq, data);
+
+ if ((val & hwcfg->irnen_t) || (val & hwcfg->irnen_r))
+ return lantiq_ssc_xmit_interrupt(irq, data);
+
+ return IRQ_HANDLED;
+}
+
+static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev,
+ struct spi_transfer *t)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ spi->tx = t->tx_buf;
+ spi->rx = t->rx_buf;
+
+ if (t->tx_buf) {
+ spi->tx_todo = t->len;
+
+ /* initially fill TX FIFO */
+ tx_fifo_write(spi);
+ }
+
+ if (spi->rx) {
+ spi->rx_todo = t->len;
+
+ /* start shift clock in RX-only mode */
+ if (!spi->tx)
+ rx_request(spi);
+ }
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return t->len;
+}
+
+/*
+ * The driver only gets an interrupt when the FIFO is empty, but there
+ * is an additional shift register from which the data is written to
+ * the wire. We get the last interrupt when the controller starts to
+ * write the last word to the wire, not when it is finished. Do busy
+ * waiting till it finishes.
+ */
+static void lantiq_ssc_bussy_work(struct work_struct *work)
+{
+ struct lantiq_ssc_spi *spi;
+ unsigned long long timeout = 8LL * 1000LL;
+ unsigned long end;
+
+ spi = container_of(work, typeof(*spi), work);
+
+ do_div(timeout, spi->speed_hz);
+ timeout += timeout + 100; /* some tolerance */
+
+ end = jiffies + msecs_to_jiffies(timeout);
+ do {
+ u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
+
+ if (!(stat & LTQ_SPI_STAT_BSY)) {
+ spi_finalize_current_transfer(spi->master);
+ return;
+ }
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, end));
+
+ if (spi->master->cur_msg)
+ spi->master->cur_msg->status = -EIO;
+ spi_finalize_current_transfer(spi->master);
+}
+
+static void lantiq_ssc_handle_err(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+ /* flush FIFOs on timeout */
+ rx_fifo_flush(spi);
+ tx_fifo_flush(spi);
+}
+
+static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
+{
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(spidev->master);
+ unsigned int cs = spidev->chip_select;
+ u32 fgpo;
+
+ if (!!(spidev->mode & SPI_CS_HIGH) == enable)
+ fgpo = (1 << (cs - spi->base_cs));
+ else
+ fgpo = (1 << (cs - spi->base_cs + LTQ_SPI_FGPO_SETOUTN_S));
+
+ lantiq_ssc_writel(spi, fgpo, LTQ_SPI_FPGO);
+}
+
+static int lantiq_ssc_transfer_one(struct spi_master *master,
+ struct spi_device *spidev,
+ struct spi_transfer *t)
+{
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+ hw_setup_transfer(spi, spidev, t);
+
+ return transfer_start(spi, spidev, t);
+}
+
+static int intel_lgm_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
+{
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ return devm_request_irq(&pdev->dev, irq, intel_lgm_ssc_isr, 0, "spi", spi);
+}
+
+static int lantiq_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
+{
+ int irq, err;
+
+ irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
+ 0, LTQ_SPI_RX_IRQ_NAME, spi);
+ if (err)
+ return err;
+
+ irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
+ 0, LTQ_SPI_TX_IRQ_NAME, spi);
+
+ if (err)
+ return err;
+
+ irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_err_interrupt,
+ 0, LTQ_SPI_ERR_IRQ_NAME, spi);
+ return err;
+}
+
+static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
+ .cfg_irq = lantiq_cfg_irq,
+ .irnen_r = LTQ_SPI_IRNEN_R_XWAY,
+ .irnen_t = LTQ_SPI_IRNEN_T_XWAY,
+ .irnicr = 0xF8,
+ .irncr = 0xFC,
+ .fifo_size_mask = GENMASK(5, 0),
+ .irq_ack = false,
+};
+
+static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
+ .cfg_irq = lantiq_cfg_irq,
+ .irnen_r = LTQ_SPI_IRNEN_R_XRX,
+ .irnen_t = LTQ_SPI_IRNEN_T_XRX,
+ .irnicr = 0xF8,
+ .irncr = 0xFC,
+ .fifo_size_mask = GENMASK(5, 0),
+ .irq_ack = false,
+};
+
+static const struct lantiq_ssc_hwcfg intel_ssc_lgm = {
+ .cfg_irq = intel_lgm_cfg_irq,
+ .irnen_r = LTQ_SPI_IRNEN_R_XRX,
+ .irnen_t = LTQ_SPI_IRNEN_T_XRX,
+ .irnicr = 0xFC,
+ .irncr = 0xF8,
+ .fifo_size_mask = GENMASK(7, 0),
+ .irq_ack = true,
+};
+
+static const struct of_device_id lantiq_ssc_match[] = {
+ { .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
+ { .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
+ { .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
+ { .compatible = "intel,lgm-spi", .data = &intel_ssc_lgm, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
+
+static int lantiq_ssc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct lantiq_ssc_spi *spi;
+ const struct lantiq_ssc_hwcfg *hwcfg;
+ u32 id, supports_dma, revision;
+ unsigned int num_cs;
+ int err;
+
+ hwcfg = of_device_get_match_data(dev);
+
+ master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
+ if (!master)
+ return -ENOMEM;
+
+ spi = spi_master_get_devdata(master);
+ spi->master = master;
+ spi->dev = dev;
+ spi->hwcfg = hwcfg;
+ platform_set_drvdata(pdev, spi);
+ spi->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spi->regbase)) {
+ err = PTR_ERR(spi->regbase);
+ goto err_master_put;
+ }
+
+ err = hwcfg->cfg_irq(pdev, spi);
+ if (err)
+ goto err_master_put;
+
+ spi->spi_clk = devm_clk_get(dev, "gate");
+ if (IS_ERR(spi->spi_clk)) {
+ err = PTR_ERR(spi->spi_clk);
+ goto err_master_put;
+ }
+ err = clk_prepare_enable(spi->spi_clk);
+ if (err)
+ goto err_master_put;
+
+ /*
+ * Use the old clk_get_fpi() function on Lantiq platform, till it
+ * supports common clk.
+ */
+#if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
+ spi->fpi_clk = clk_get_fpi();
+#else
+ spi->fpi_clk = clk_get(dev, "freq");
+#endif
+ if (IS_ERR(spi->fpi_clk)) {
+ err = PTR_ERR(spi->fpi_clk);
+ goto err_clk_disable;
+ }
+
+ num_cs = 8;
+ of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+
+ spi->base_cs = 1;
+ of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs);
+
+ spin_lock_init(&spi->lock);
+ spi->bits_per_word = 8;
+ spi->speed_hz = 0;
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->num_chipselect = num_cs;
+ master->use_gpio_descriptors = true;
+ master->setup = lantiq_ssc_setup;
+ master->set_cs = lantiq_ssc_set_cs;
+ master->handle_err = lantiq_ssc_handle_err;
+ master->prepare_message = lantiq_ssc_prepare_message;
+ master->unprepare_message = lantiq_ssc_unprepare_message;
+ master->transfer_one = lantiq_ssc_transfer_one;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
+ SPI_LOOP;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
+
+ spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM);
+ if (!spi->wq) {
+ err = -ENOMEM;
+ goto err_clk_put;
+ }
+ INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
+
+ id = lantiq_ssc_readl(spi, LTQ_SPI_ID);
+ spi->tx_fifo_size = (id >> LTQ_SPI_ID_TXFS_S) & hwcfg->fifo_size_mask;
+ spi->rx_fifo_size = (id >> LTQ_SPI_ID_RXFS_S) & hwcfg->fifo_size_mask;
+ supports_dma = (id & LTQ_SPI_ID_CFG_M) >> LTQ_SPI_ID_CFG_S;
+ revision = id & LTQ_SPI_ID_REV_M;
+
+ lantiq_ssc_hw_init(spi);
+
+ dev_info(dev,
+ "Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
+ revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
+
+ err = devm_spi_register_master(dev, master);
+ if (err) {
+ dev_err(dev, "failed to register spi_master\n");
+ goto err_wq_destroy;
+ }
+
+ return 0;
+
+err_wq_destroy:
+ destroy_workqueue(spi->wq);
+err_clk_put:
+ clk_put(spi->fpi_clk);
+err_clk_disable:
+ clk_disable_unprepare(spi->spi_clk);
+err_master_put:
+ spi_master_put(master);
+
+ return err;
+}
+
+static int lantiq_ssc_remove(struct platform_device *pdev)
+{
+ struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev);
+
+ lantiq_ssc_writel(spi, 0, LTQ_SPI_IRNEN);
+ lantiq_ssc_writel(spi, 0, LTQ_SPI_CLC);
+ rx_fifo_flush(spi);
+ tx_fifo_flush(spi);
+ hw_enter_config_mode(spi);
+
+ destroy_workqueue(spi->wq);
+ clk_disable_unprepare(spi->spi_clk);
+ clk_put(spi->fpi_clk);
+
+ return 0;
+}
+
+static struct platform_driver lantiq_ssc_driver = {
+ .probe = lantiq_ssc_probe,
+ .remove = lantiq_ssc_remove,
+ .driver = {
+ .name = "spi-lantiq-ssc",
+ .of_match_table = lantiq_ssc_match,
+ },
+};
+module_platform_driver(lantiq_ssc_driver);
+
+MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
+MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:spi-lantiq-ssc");
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
new file mode 100644
index 000000000..ead0507c6
--- /dev/null
+++ b/drivers/spi/spi-lm70llp.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for LM70EVAL-LLP board for the LM70 sensor
+ *
+ * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/parport.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+/*
+ * The LM70 communicates with a host processor using a 3-wire variant of
+ * the SPI/Microwire bus interface. This driver specifically supports an
+ * NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel
+ * port to bitbang an SPI-parport bridge. Accordingly, this is an SPI
+ * master controller driver. The hwmon/lm70 driver is a "SPI protocol
+ * driver", layered on top of this one and usable without the lm70llp.
+ *
+ * Datasheet and Schematic:
+ * The LM70 is a temperature sensor chip from National Semiconductor; its
+ * datasheet is available at http://www.national.com/pf/LM/LM70.html
+ * The schematic for this particular board (the LM70EVAL-LLP) is
+ * available (on page 4) here:
+ * http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
+ *
+ * Also see Documentation/spi/spi-lm70llp.rst. The SPI<->parport code here is
+ * (heavily) based on spi-butterfly by David Brownell.
+ *
+ * The LM70 LLP connects to the PC parallel port in the following manner:
+ *
+ * Parallel LM70 LLP
+ * Port Direction JP2 Header
+ * ----------- --------- ------------
+ * D0 2 - -
+ * D1 3 --> V+ 5
+ * D2 4 --> V+ 5
+ * D3 5 --> V+ 5
+ * D4 6 --> V+ 5
+ * D5 7 --> nCS 8
+ * D6 8 --> SCLK 3
+ * D7 9 --> SI/O 5
+ * GND 25 - GND 7
+ * Select 13 <-- SI/O 1
+ *
+ * Note that parport pin 13 actually gets inverted by the transistor
+ * arrangement which lets either the parport or the LM70 drive the
+ * SI/SO signal (see the schematic for details).
+ */
+
+#define DRVNAME "spi-lm70llp"
+
+#define lm70_INIT 0xBE
+#define SIO 0x10
+#define nCS 0x20
+#define SCLK 0x40
+
+/*-------------------------------------------------------------------------*/
+
+struct spi_lm70llp {
+ struct spi_bitbang bitbang;
+ struct parport *port;
+ struct pardevice *pd;
+ struct spi_device *spidev_lm70;
+ struct spi_board_info info;
+ //struct device *dev;
+};
+
+/* REVISIT : ugly global ; provides "exclusive open" facility */
+static struct spi_lm70llp *lm70llp;
+
+/*-------------------------------------------------------------------*/
+
+static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi)
+{
+ return spi->controller_data;
+}
+
+/*---------------------- LM70 LLP eval board-specific inlines follow */
+
+/* NOTE: we don't actually need to reread the output values, since they'll
+ * still be what we wrote before. Plus, going through parport builds in
+ * a ~1ms/operation delay; these SPI transfers could easily be faster.
+ */
+
+static inline void deassertCS(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+
+ data &= ~0x80; /* pull D7/SI-out low while de-asserted */
+ parport_write_data(pp->port, data | nCS);
+}
+
+static inline void assertCS(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+
+ data |= 0x80; /* pull D7/SI-out high so lm70 drives SO-in */
+ parport_write_data(pp->port, data & ~nCS);
+}
+
+static inline void clkHigh(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+
+ parport_write_data(pp->port, data | SCLK);
+}
+
+static inline void clkLow(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+
+ parport_write_data(pp->port, data & ~SCLK);
+}
+
+/*------------------------- SPI-LM70-specific inlines ----------------------*/
+
+static inline void spidelay(unsigned d)
+{
+ udelay(d);
+}
+
+static inline void setsck(struct spi_device *s, int is_on)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(s);
+
+ if (is_on)
+ clkHigh(pp);
+ else
+ clkLow(pp);
+}
+
+static inline void setmosi(struct spi_device *s, int is_on)
+{
+ /* FIXME update D7 ... this way we can put the chip
+ * into shutdown mode and read the manufacturer ID,
+ * but we can't put it back into operational mode.
+ */
+}
+
+/*
+ * getmiso:
+ * Why do we return 0 when the SIO line is high and vice-versa?
+ * The fact is, the lm70 eval board from NS (which this driver drives),
+ * is wired in just such a way : when the lm70's SIO goes high, a transistor
+ * switches it to low reflecting this on the parport (pin 13), and vice-versa.
+ */
+static inline int getmiso(struct spi_device *s)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(s);
+
+ return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1);
+}
+
+/*--------------------------------------------------------------------*/
+
+#include "spi-bitbang-txrx.h"
+
+static void lm70_chipselect(struct spi_device *spi, int value)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(spi);
+
+ if (value)
+ assertCS(pp);
+ else
+ deassertCS(pp);
+}
+
+/*
+ * Our actual bitbanger routine.
+ */
+static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+}
+
+static void spi_lm70llp_attach(struct parport *p)
+{
+ struct pardevice *pd;
+ struct spi_lm70llp *pp;
+ struct spi_master *master;
+ int status;
+ struct pardev_cb lm70llp_cb;
+
+ if (lm70llp) {
+ pr_warn("spi_lm70llp instance already loaded. Aborting.\n");
+ return;
+ }
+
+ /* TODO: this just _assumes_ a lm70 is there ... no probe;
+ * the lm70 driver could verify it, reading the manf ID.
+ */
+
+ master = spi_alloc_master(p->physport->dev, sizeof(*pp));
+ if (!master) {
+ status = -ENOMEM;
+ goto out_fail;
+ }
+ pp = spi_master_get_devdata(master);
+
+ /*
+ * SPI and bitbang hookup.
+ */
+ pp->bitbang.master = master;
+ pp->bitbang.chipselect = lm70_chipselect;
+ pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
+ pp->bitbang.flags = SPI_3WIRE;
+
+ /*
+ * Parport hookup
+ */
+ pp->port = p;
+ memset(&lm70llp_cb, 0, sizeof(lm70llp_cb));
+ lm70llp_cb.private = pp;
+ lm70llp_cb.flags = PARPORT_FLAG_EXCL;
+ pd = parport_register_dev_model(p, DRVNAME, &lm70llp_cb, 0);
+
+ if (!pd) {
+ status = -ENOMEM;
+ goto out_free_master;
+ }
+ pp->pd = pd;
+
+ status = parport_claim(pd);
+ if (status < 0)
+ goto out_parport_unreg;
+
+ /*
+ * Start SPI ...
+ */
+ status = spi_bitbang_start(&pp->bitbang);
+ if (status < 0) {
+ dev_warn(&pd->dev, "spi_bitbang_start failed with status %d\n",
+ status);
+ goto out_off_and_release;
+ }
+
+ /*
+ * The modalias name MUST match the device_driver name
+ * for the bus glue code to match and subsequently bind them.
+ * We are binding to the generic drivers/hwmon/lm70.c device
+ * driver.
+ */
+ strcpy(pp->info.modalias, "lm70");
+ pp->info.max_speed_hz = 6 * 1000 * 1000;
+ pp->info.chip_select = 0;
+ pp->info.mode = SPI_3WIRE | SPI_MODE_0;
+
+ /* power up the chip, and let the LM70 control SI/SO */
+ parport_write_data(pp->port, lm70_INIT);
+
+ /* Enable access to our primary data structure via
+ * the board info's (void *)controller_data.
+ */
+ pp->info.controller_data = pp;
+ pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info);
+ if (pp->spidev_lm70)
+ dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n",
+ dev_name(&pp->spidev_lm70->dev));
+ else {
+ dev_warn(&pd->dev, "spi_new_device failed\n");
+ status = -ENODEV;
+ goto out_bitbang_stop;
+ }
+ pp->spidev_lm70->bits_per_word = 8;
+
+ lm70llp = pp;
+ return;
+
+out_bitbang_stop:
+ spi_bitbang_stop(&pp->bitbang);
+out_off_and_release:
+ /* power down */
+ parport_write_data(pp->port, 0);
+ mdelay(10);
+ parport_release(pp->pd);
+out_parport_unreg:
+ parport_unregister_device(pd);
+out_free_master:
+ spi_master_put(master);
+out_fail:
+ pr_info("spi_lm70llp probe fail, status %d\n", status);
+}
+
+static void spi_lm70llp_detach(struct parport *p)
+{
+ struct spi_lm70llp *pp;
+
+ if (!lm70llp || lm70llp->port != p)
+ return;
+
+ pp = lm70llp;
+ spi_bitbang_stop(&pp->bitbang);
+
+ /* power down */
+ parport_write_data(pp->port, 0);
+
+ parport_release(pp->pd);
+ parport_unregister_device(pp->pd);
+
+ spi_master_put(pp->bitbang.master);
+
+ lm70llp = NULL;
+}
+
+static struct parport_driver spi_lm70llp_drv = {
+ .name = DRVNAME,
+ .match_port = spi_lm70llp_attach,
+ .detach = spi_lm70llp_detach,
+ .devmodel = true,
+};
+module_parport_driver(spi_lm70llp_drv);
+
+MODULE_AUTHOR("Kaiwan N Billimoria <kaiwan@designergraphix.com>");
+MODULE_DESCRIPTION(
+ "Parport adapter for the National Semiconductor LM70 LLP eval board");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
new file mode 100644
index 000000000..dd7de8fa3
--- /dev/null
+++ b/drivers/spi/spi-loopback-test.c
@@ -0,0 +1,1111 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/spi/spi-loopback-test.c
+ *
+ * (c) Martin Sperl <kernel@martin.sperl.org>
+ *
+ * Loopback test driver to test several typical spi_message conditions
+ * that a spi_master driver may encounter
+ * this can also get used for regression testing
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/printk.h>
+#include <linux/vmalloc.h>
+#include <linux/spi/spi.h>
+
+#include "spi-test.h"
+
+/* flag to only simulate transfers */
+static int simulate_only;
+module_param(simulate_only, int, 0);
+MODULE_PARM_DESC(simulate_only, "if not 0 do not execute the spi message");
+
+/* dump spi messages */
+static int dump_messages;
+module_param(dump_messages, int, 0);
+MODULE_PARM_DESC(dump_messages,
+ "=1 dump the basic spi_message_structure, " \
+ "=2 dump the spi_message_structure including data, " \
+ "=3 dump the spi_message structure before and after execution");
+/* the device is jumpered for loopback - enabling some rx_buf tests */
+static int loopback;
+module_param(loopback, int, 0);
+MODULE_PARM_DESC(loopback,
+ "if set enable loopback mode, where the rx_buf " \
+ "is checked to match tx_buf after the spi_message " \
+ "is executed");
+
+static int loop_req;
+module_param(loop_req, int, 0);
+MODULE_PARM_DESC(loop_req,
+ "if set controller will be asked to enable test loop mode. " \
+ "If controller supported it, MISO and MOSI will be connected");
+
+static int no_cs;
+module_param(no_cs, int, 0);
+MODULE_PARM_DESC(no_cs,
+ "if set Chip Select (CS) will not be used");
+
+/* run only a specific test */
+static int run_only_test = -1;
+module_param(run_only_test, int, 0);
+MODULE_PARM_DESC(run_only_test,
+ "only run the test with this number (0-based !)");
+
+/* use vmalloc'ed buffers */
+static int use_vmalloc;
+module_param(use_vmalloc, int, 0644);
+MODULE_PARM_DESC(use_vmalloc,
+ "use vmalloc'ed buffers instead of kmalloc'ed");
+
+/* check rx ranges */
+static int check_ranges = 1;
+module_param(check_ranges, int, 0644);
+MODULE_PARM_DESC(check_ranges,
+ "checks rx_buffer pattern are valid");
+
+/* the actual tests to execute */
+static struct spi_test spi_tests[] = {
+ {
+ .description = "tx/rx-transfer - start of page",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_rx_align = ITERATE_ALIGN,
+ .transfer_count = 1,
+ .transfers = {
+ {
+ .tx_buf = TX(0),
+ .rx_buf = RX(0),
+ },
+ },
+ },
+ {
+ .description = "tx/rx-transfer - crossing PAGE_SIZE",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_rx_align = ITERATE_ALIGN,
+ .transfer_count = 1,
+ .transfers = {
+ {
+ .tx_buf = TX(PAGE_SIZE - 4),
+ .rx_buf = RX(PAGE_SIZE - 4),
+ },
+ },
+ },
+ {
+ .description = "tx-transfer - only",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .transfer_count = 1,
+ .transfers = {
+ {
+ .tx_buf = TX(0),
+ },
+ },
+ },
+ {
+ .description = "rx-transfer - only",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_rx_align = ITERATE_ALIGN,
+ .transfer_count = 1,
+ .transfers = {
+ {
+ .rx_buf = RX(0),
+ },
+ },
+ },
+ {
+ .description = "two tx-transfers - alter both",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_transfer_mask = BIT(0) | BIT(1),
+ .transfer_count = 2,
+ .transfers = {
+ {
+ .tx_buf = TX(0),
+ },
+ {
+ /* this is why we cant use ITERATE_MAX_LEN */
+ .tx_buf = TX(SPI_TEST_MAX_SIZE_HALF),
+ },
+ },
+ },
+ {
+ .description = "two tx-transfers - alter first",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_transfer_mask = BIT(0),
+ .transfer_count = 2,
+ .transfers = {
+ {
+ .tx_buf = TX(64),
+ },
+ {
+ .len = 1,
+ .tx_buf = TX(0),
+ },
+ },
+ },
+ {
+ .description = "two tx-transfers - alter second",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_transfer_mask = BIT(1),
+ .transfer_count = 2,
+ .transfers = {
+ {
+ .len = 16,
+ .tx_buf = TX(0),
+ },
+ {
+ .tx_buf = TX(64),
+ },
+ },
+ },
+ {
+ .description = "two transfers tx then rx - alter both",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_transfer_mask = BIT(0) | BIT(1),
+ .transfer_count = 2,
+ .transfers = {
+ {
+ .tx_buf = TX(0),
+ },
+ {
+ .rx_buf = RX(0),
+ },
+ },
+ },
+ {
+ .description = "two transfers tx then rx - alter tx",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_transfer_mask = BIT(0),
+ .transfer_count = 2,
+ .transfers = {
+ {
+ .tx_buf = TX(0),
+ },
+ {
+ .len = 1,
+ .rx_buf = RX(0),
+ },
+ },
+ },
+ {
+ .description = "two transfers tx then rx - alter rx",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_transfer_mask = BIT(1),
+ .transfer_count = 2,
+ .transfers = {
+ {
+ .len = 1,
+ .tx_buf = TX(0),
+ },
+ {
+ .rx_buf = RX(0),
+ },
+ },
+ },
+ {
+ .description = "two tx+rx transfers - alter both",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_transfer_mask = BIT(0) | BIT(1),
+ .transfer_count = 2,
+ .transfers = {
+ {
+ .tx_buf = TX(0),
+ .rx_buf = RX(0),
+ },
+ {
+ /* making sure we align without overwrite
+ * the reason we can not use ITERATE_MAX_LEN
+ */
+ .tx_buf = TX(SPI_TEST_MAX_SIZE_HALF),
+ .rx_buf = RX(SPI_TEST_MAX_SIZE_HALF),
+ },
+ },
+ },
+ {
+ .description = "two tx+rx transfers - alter first",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_transfer_mask = BIT(0),
+ .transfer_count = 2,
+ .transfers = {
+ {
+ /* making sure we align without overwrite */
+ .tx_buf = TX(1024),
+ .rx_buf = RX(1024),
+ },
+ {
+ .len = 1,
+ /* making sure we align without overwrite */
+ .tx_buf = TX(0),
+ .rx_buf = RX(0),
+ },
+ },
+ },
+ {
+ .description = "two tx+rx transfers - alter second",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_tx_align = ITERATE_ALIGN,
+ .iterate_transfer_mask = BIT(1),
+ .transfer_count = 2,
+ .transfers = {
+ {
+ .len = 1,
+ .tx_buf = TX(0),
+ .rx_buf = RX(0),
+ },
+ {
+ /* making sure we align without overwrite */
+ .tx_buf = TX(1024),
+ .rx_buf = RX(1024),
+ },
+ },
+ },
+ {
+ .description = "two tx+rx transfers - delay after transfer",
+ .fill_option = FILL_COUNT_8,
+ .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_transfer_mask = BIT(0) | BIT(1),
+ .transfer_count = 2,
+ .transfers = {
+ {
+ .tx_buf = TX(0),
+ .rx_buf = RX(0),
+ .delay = {
+ .value = 1000,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
+ },
+ {
+ .tx_buf = TX(0),
+ .rx_buf = RX(0),
+ .delay = {
+ .value = 1000,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
+ },
+ },
+ },
+ {
+ .description = "three tx+rx transfers with overlapping cache lines",
+ .fill_option = FILL_COUNT_8,
+ /*
+ * This should be large enough for the controller driver to
+ * choose to transfer it with DMA.
+ */
+ .iterate_len = { 512, -1 },
+ .iterate_transfer_mask = BIT(1),
+ .transfer_count = 3,
+ .transfers = {
+ {
+ .len = 1,
+ .tx_buf = TX(0),
+ .rx_buf = RX(0),
+ },
+ {
+ .tx_buf = TX(1),
+ .rx_buf = RX(1),
+ },
+ {
+ .len = 1,
+ .tx_buf = TX(513),
+ .rx_buf = RX(513),
+ },
+ },
+ },
+
+ { /* end of tests sequence */ }
+};
+
+static int spi_loopback_test_probe(struct spi_device *spi)
+{
+ int ret;
+
+ if (loop_req || no_cs) {
+ spi->mode |= loop_req ? SPI_LOOP : 0;
+ spi->mode |= no_cs ? SPI_NO_CS : 0;
+ ret = spi_setup(spi);
+ if (ret) {
+ dev_err(&spi->dev, "SPI setup with SPI_LOOP or SPI_NO_CS failed (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ dev_info(&spi->dev, "Executing spi-loopback-tests\n");
+
+ ret = spi_test_run_tests(spi, spi_tests);
+
+ dev_info(&spi->dev, "Finished spi-loopback-tests with return: %i\n",
+ ret);
+
+ return ret;
+}
+
+/* non const match table to permit to change via a module parameter */
+static struct of_device_id spi_loopback_test_of_match[] = {
+ { .compatible = "linux,spi-loopback-test", },
+ { }
+};
+
+/* allow to override the compatible string via a module_parameter */
+module_param_string(compatible, spi_loopback_test_of_match[0].compatible,
+ sizeof(spi_loopback_test_of_match[0].compatible),
+ 0000);
+
+MODULE_DEVICE_TABLE(of, spi_loopback_test_of_match);
+
+static struct spi_driver spi_loopback_test_driver = {
+ .driver = {
+ .name = "spi-loopback-test",
+ .owner = THIS_MODULE,
+ .of_match_table = spi_loopback_test_of_match,
+ },
+ .probe = spi_loopback_test_probe,
+};
+
+module_spi_driver(spi_loopback_test_driver);
+
+MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>");
+MODULE_DESCRIPTION("test spi_driver to check core functionality");
+MODULE_LICENSE("GPL");
+
+/*-------------------------------------------------------------------------*/
+
+/* spi_test implementation */
+
+#define RANGE_CHECK(ptr, plen, start, slen) \
+ ((ptr >= start) && (ptr + plen <= start + slen))
+
+/* we allocate one page more, to allow for offsets */
+#define SPI_TEST_MAX_SIZE_PLUS (SPI_TEST_MAX_SIZE + PAGE_SIZE)
+
+static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len)
+{
+ /* limit the hex_dump */
+ if (len < 1024) {
+ print_hex_dump(KERN_INFO, pre,
+ DUMP_PREFIX_OFFSET, 16, 1,
+ ptr, len, 0);
+ return;
+ }
+ /* print head */
+ print_hex_dump(KERN_INFO, pre,
+ DUMP_PREFIX_OFFSET, 16, 1,
+ ptr, 512, 0);
+ /* print tail */
+ pr_info("%s truncated - continuing at offset %04zx\n",
+ pre, len - 512);
+ print_hex_dump(KERN_INFO, pre,
+ DUMP_PREFIX_OFFSET, 16, 1,
+ ptr + (len - 512), 512, 0);
+}
+
+static void spi_test_dump_message(struct spi_device *spi,
+ struct spi_message *msg,
+ bool dump_data)
+{
+ struct spi_transfer *xfer;
+ int i;
+ u8 b;
+
+ dev_info(&spi->dev, " spi_msg@%pK\n", msg);
+ if (msg->status)
+ dev_info(&spi->dev, " status: %i\n",
+ msg->status);
+ dev_info(&spi->dev, " frame_length: %i\n",
+ msg->frame_length);
+ dev_info(&spi->dev, " actual_length: %i\n",
+ msg->actual_length);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ dev_info(&spi->dev, " spi_transfer@%pK\n", xfer);
+ dev_info(&spi->dev, " len: %i\n", xfer->len);
+ dev_info(&spi->dev, " tx_buf: %pK\n", xfer->tx_buf);
+ if (dump_data && xfer->tx_buf)
+ spi_test_print_hex_dump(" TX: ",
+ xfer->tx_buf,
+ xfer->len);
+
+ dev_info(&spi->dev, " rx_buf: %pK\n", xfer->rx_buf);
+ if (dump_data && xfer->rx_buf)
+ spi_test_print_hex_dump(" RX: ",
+ xfer->rx_buf,
+ xfer->len);
+ /* check for unwritten test pattern on rx_buf */
+ if (xfer->rx_buf) {
+ for (i = 0 ; i < xfer->len ; i++) {
+ b = ((u8 *)xfer->rx_buf)[xfer->len - 1 - i];
+ if (b != SPI_TEST_PATTERN_UNWRITTEN)
+ break;
+ }
+ if (i)
+ dev_info(&spi->dev,
+ " rx_buf filled with %02x starts at offset: %i\n",
+ SPI_TEST_PATTERN_UNWRITTEN,
+ xfer->len - i);
+ }
+ }
+}
+
+struct rx_ranges {
+ struct list_head list;
+ u8 *start;
+ u8 *end;
+};
+
+static int rx_ranges_cmp(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list);
+ struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list);
+
+ if (rx_a->start > rx_b->start)
+ return 1;
+ if (rx_a->start < rx_b->start)
+ return -1;
+ return 0;
+}
+
+static int spi_check_rx_ranges(struct spi_device *spi,
+ struct spi_message *msg,
+ void *rx)
+{
+ struct spi_transfer *xfer;
+ struct rx_ranges ranges[SPI_TEST_MAX_TRANSFERS], *r;
+ int i = 0;
+ LIST_HEAD(ranges_list);
+ u8 *addr;
+ int ret = 0;
+
+ /* loop over all transfers to fill in the rx_ranges */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* if there is no rx, then no check is needed */
+ if (!xfer->rx_buf)
+ continue;
+ /* fill in the rx_range */
+ if (RANGE_CHECK(xfer->rx_buf, xfer->len,
+ rx, SPI_TEST_MAX_SIZE_PLUS)) {
+ ranges[i].start = xfer->rx_buf;
+ ranges[i].end = xfer->rx_buf + xfer->len;
+ list_add(&ranges[i].list, &ranges_list);
+ i++;
+ }
+ }
+
+ /* if no ranges, then we can return and avoid the checks...*/
+ if (!i)
+ return 0;
+
+ /* sort the list */
+ list_sort(NULL, &ranges_list, rx_ranges_cmp);
+
+ /* and iterate over all the rx addresses */
+ for (addr = rx; addr < (u8 *)rx + SPI_TEST_MAX_SIZE_PLUS; addr++) {
+ /* if we are the DO not write pattern,
+ * then continue with the loop...
+ */
+ if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE)
+ continue;
+
+ /* check if we are inside a range */
+ list_for_each_entry(r, &ranges_list, list) {
+ /* if so then set to end... */
+ if ((addr >= r->start) && (addr < r->end))
+ addr = r->end;
+ }
+ /* second test after a (hopefull) translation */
+ if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE)
+ continue;
+
+ /* if still not found then something has modified too much */
+ /* we could list the "closest" transfer here... */
+ dev_err(&spi->dev,
+ "loopback strangeness - rx changed outside of allowed range at: %pK\n",
+ addr);
+ /* do not return, only set ret,
+ * so that we list all addresses
+ */
+ ret = -ERANGE;
+ }
+
+ return ret;
+}
+
+static int spi_test_check_elapsed_time(struct spi_device *spi,
+ struct spi_test *test)
+{
+ int i;
+ unsigned long long estimated_time = 0;
+ unsigned long long delay_usecs = 0;
+
+ for (i = 0; i < test->transfer_count; i++) {
+ struct spi_transfer *xfer = test->transfers + i;
+ unsigned long long nbits = (unsigned long long)BITS_PER_BYTE *
+ xfer->len;
+
+ delay_usecs += xfer->delay.value;
+ if (!xfer->speed_hz)
+ continue;
+ estimated_time += div_u64(nbits * NSEC_PER_SEC, xfer->speed_hz);
+ }
+
+ estimated_time += delay_usecs * NSEC_PER_USEC;
+ if (test->elapsed_time < estimated_time) {
+ dev_err(&spi->dev,
+ "elapsed time %lld ns is shorter than minimum estimated time %lld ns\n",
+ test->elapsed_time, estimated_time);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int spi_test_check_loopback_result(struct spi_device *spi,
+ struct spi_message *msg,
+ void *tx, void *rx)
+{
+ struct spi_transfer *xfer;
+ u8 rxb, txb;
+ size_t i;
+ int ret;
+
+ /* checks rx_buffer pattern are valid with loopback or without */
+ if (check_ranges) {
+ ret = spi_check_rx_ranges(spi, msg, rx);
+ if (ret)
+ return ret;
+ }
+
+ /* if we run without loopback, then return now */
+ if (!loopback)
+ return 0;
+
+ /* if applicable to transfer check that rx_buf is equal to tx_buf */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* if there is no rx, then no check is needed */
+ if (!xfer->len || !xfer->rx_buf)
+ continue;
+ /* so depending on tx_buf we need to handle things */
+ if (xfer->tx_buf) {
+ for (i = 0; i < xfer->len; i++) {
+ txb = ((u8 *)xfer->tx_buf)[i];
+ rxb = ((u8 *)xfer->rx_buf)[i];
+ if (txb != rxb)
+ goto mismatch_error;
+ }
+ } else {
+ /* first byte received */
+ txb = ((u8 *)xfer->rx_buf)[0];
+ /* first byte may be 0 or xff */
+ if (!((txb == 0) || (txb == 0xff))) {
+ dev_err(&spi->dev,
+ "loopback strangeness - we expect 0x00 or 0xff, but not 0x%02x\n",
+ txb);
+ return -EINVAL;
+ }
+ /* check that all bytes are identical */
+ for (i = 1; i < xfer->len; i++) {
+ rxb = ((u8 *)xfer->rx_buf)[i];
+ if (rxb != txb)
+ goto mismatch_error;
+ }
+ }
+ }
+
+ return 0;
+
+mismatch_error:
+ dev_err(&spi->dev,
+ "loopback strangeness - transfer mismatch on byte %04zx - expected 0x%02x, but got 0x%02x\n",
+ i, txb, rxb);
+
+ return -EINVAL;
+}
+
+static int spi_test_translate(struct spi_device *spi,
+ void **ptr, size_t len,
+ void *tx, void *rx)
+{
+ size_t off;
+
+ /* return on null */
+ if (!*ptr)
+ return 0;
+
+ /* in the MAX_SIZE_HALF case modify the pointer */
+ if (((size_t)*ptr) & SPI_TEST_MAX_SIZE_HALF)
+ /* move the pointer to the correct range */
+ *ptr += (SPI_TEST_MAX_SIZE_PLUS / 2) -
+ SPI_TEST_MAX_SIZE_HALF;
+
+ /* RX range
+ * - we check against MAX_SIZE_PLUS to allow for automated alignment
+ */
+ if (RANGE_CHECK(*ptr, len, RX(0), SPI_TEST_MAX_SIZE_PLUS)) {
+ off = *ptr - RX(0);
+ *ptr = rx + off;
+
+ return 0;
+ }
+
+ /* TX range */
+ if (RANGE_CHECK(*ptr, len, TX(0), SPI_TEST_MAX_SIZE_PLUS)) {
+ off = *ptr - TX(0);
+ *ptr = tx + off;
+
+ return 0;
+ }
+
+ dev_err(&spi->dev,
+ "PointerRange [%pK:%pK[ not in range [%pK:%pK[ or [%pK:%pK[\n",
+ *ptr, *ptr + len,
+ RX(0), RX(SPI_TEST_MAX_SIZE),
+ TX(0), TX(SPI_TEST_MAX_SIZE));
+
+ return -EINVAL;
+}
+
+static int spi_test_fill_pattern(struct spi_device *spi,
+ struct spi_test *test)
+{
+ struct spi_transfer *xfers = test->transfers;
+ u8 *tx_buf;
+ size_t count = 0;
+ int i, j;
+
+#ifdef __BIG_ENDIAN
+#define GET_VALUE_BYTE(value, index, bytes) \
+ (value >> (8 * (bytes - 1 - count % bytes)))
+#else
+#define GET_VALUE_BYTE(value, index, bytes) \
+ (value >> (8 * (count % bytes)))
+#endif
+
+ /* fill all transfers with the pattern requested */
+ for (i = 0; i < test->transfer_count; i++) {
+ /* fill rx_buf with SPI_TEST_PATTERN_UNWRITTEN */
+ if (xfers[i].rx_buf)
+ memset(xfers[i].rx_buf, SPI_TEST_PATTERN_UNWRITTEN,
+ xfers[i].len);
+ /* if tx_buf is NULL then skip */
+ tx_buf = (u8 *)xfers[i].tx_buf;
+ if (!tx_buf)
+ continue;
+ /* modify all the transfers */
+ for (j = 0; j < xfers[i].len; j++, tx_buf++, count++) {
+ /* fill tx */
+ switch (test->fill_option) {
+ case FILL_MEMSET_8:
+ *tx_buf = test->fill_pattern;
+ break;
+ case FILL_MEMSET_16:
+ *tx_buf = GET_VALUE_BYTE(test->fill_pattern,
+ count, 2);
+ break;
+ case FILL_MEMSET_24:
+ *tx_buf = GET_VALUE_BYTE(test->fill_pattern,
+ count, 3);
+ break;
+ case FILL_MEMSET_32:
+ *tx_buf = GET_VALUE_BYTE(test->fill_pattern,
+ count, 4);
+ break;
+ case FILL_COUNT_8:
+ *tx_buf = count;
+ break;
+ case FILL_COUNT_16:
+ *tx_buf = GET_VALUE_BYTE(count, count, 2);
+ break;
+ case FILL_COUNT_24:
+ *tx_buf = GET_VALUE_BYTE(count, count, 3);
+ break;
+ case FILL_COUNT_32:
+ *tx_buf = GET_VALUE_BYTE(count, count, 4);
+ break;
+ case FILL_TRANSFER_BYTE_8:
+ *tx_buf = j;
+ break;
+ case FILL_TRANSFER_BYTE_16:
+ *tx_buf = GET_VALUE_BYTE(j, j, 2);
+ break;
+ case FILL_TRANSFER_BYTE_24:
+ *tx_buf = GET_VALUE_BYTE(j, j, 3);
+ break;
+ case FILL_TRANSFER_BYTE_32:
+ *tx_buf = GET_VALUE_BYTE(j, j, 4);
+ break;
+ case FILL_TRANSFER_NUM:
+ *tx_buf = i;
+ break;
+ default:
+ dev_err(&spi->dev,
+ "unsupported fill_option: %i\n",
+ test->fill_option);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int _spi_test_run_iter(struct spi_device *spi,
+ struct spi_test *test,
+ void *tx, void *rx)
+{
+ struct spi_message *msg = &test->msg;
+ struct spi_transfer *x;
+ int i, ret;
+
+ /* initialize message - zero-filled via static initialization */
+ spi_message_init_no_memset(msg);
+
+ /* fill rx with the DO_NOT_WRITE pattern */
+ memset(rx, SPI_TEST_PATTERN_DO_NOT_WRITE, SPI_TEST_MAX_SIZE_PLUS);
+
+ /* add the individual transfers */
+ for (i = 0; i < test->transfer_count; i++) {
+ x = &test->transfers[i];
+
+ /* patch the values of tx_buf */
+ ret = spi_test_translate(spi, (void **)&x->tx_buf, x->len,
+ (void *)tx, rx);
+ if (ret)
+ return ret;
+
+ /* patch the values of rx_buf */
+ ret = spi_test_translate(spi, &x->rx_buf, x->len,
+ (void *)tx, rx);
+ if (ret)
+ return ret;
+
+ /* and add it to the list */
+ spi_message_add_tail(x, msg);
+ }
+
+ /* fill in the transfer buffers with pattern */
+ ret = spi_test_fill_pattern(spi, test);
+ if (ret)
+ return ret;
+
+ /* and execute */
+ if (test->execute_msg)
+ ret = test->execute_msg(spi, test, tx, rx);
+ else
+ ret = spi_test_execute_msg(spi, test, tx, rx);
+
+ /* handle result */
+ if (ret == test->expected_return)
+ return 0;
+
+ dev_err(&spi->dev,
+ "test failed - test returned %i, but we expect %i\n",
+ ret, test->expected_return);
+
+ if (ret)
+ return ret;
+
+ /* if it is 0, as we expected something else,
+ * then return something special
+ */
+ return -EFAULT;
+}
+
+static int spi_test_run_iter(struct spi_device *spi,
+ const struct spi_test *testtemplate,
+ void *tx, void *rx,
+ size_t len,
+ size_t tx_off,
+ size_t rx_off
+ )
+{
+ struct spi_test test;
+ int i, tx_count, rx_count;
+
+ /* copy the test template to test */
+ memcpy(&test, testtemplate, sizeof(test));
+
+ /* if iterate_transfer_mask is not set,
+ * then set it to first transfer only
+ */
+ if (!(test.iterate_transfer_mask & (BIT(test.transfer_count) - 1)))
+ test.iterate_transfer_mask = 1;
+
+ /* count number of transfers with tx/rx_buf != NULL */
+ rx_count = tx_count = 0;
+ for (i = 0; i < test.transfer_count; i++) {
+ if (test.transfers[i].tx_buf)
+ tx_count++;
+ if (test.transfers[i].rx_buf)
+ rx_count++;
+ }
+
+ /* in some iteration cases warn and exit early,
+ * as there is nothing to do, that has not been tested already...
+ */
+ if (tx_off && (!tx_count)) {
+ dev_warn_once(&spi->dev,
+ "%s: iterate_tx_off configured with tx_buf==NULL - ignoring\n",
+ test.description);
+ return 0;
+ }
+ if (rx_off && (!rx_count)) {
+ dev_warn_once(&spi->dev,
+ "%s: iterate_rx_off configured with rx_buf==NULL - ignoring\n",
+ test.description);
+ return 0;
+ }
+
+ /* write out info */
+ if (!(len || tx_off || rx_off)) {
+ dev_info(&spi->dev, "Running test %s\n", test.description);
+ } else {
+ dev_info(&spi->dev,
+ " with iteration values: len = %zu, tx_off = %zu, rx_off = %zu\n",
+ len, tx_off, rx_off);
+ }
+
+ /* update in the values from iteration values */
+ for (i = 0; i < test.transfer_count; i++) {
+ /* only when bit in transfer mask is set */
+ if (!(test.iterate_transfer_mask & BIT(i)))
+ continue;
+ test.transfers[i].len = len;
+ if (test.transfers[i].tx_buf)
+ test.transfers[i].tx_buf += tx_off;
+ if (test.transfers[i].rx_buf)
+ test.transfers[i].rx_buf += rx_off;
+ }
+
+ /* and execute */
+ return _spi_test_run_iter(spi, &test, tx, rx);
+}
+
+/**
+ * spi_test_execute_msg - default implementation to run a test
+ *
+ * @spi: @spi_device on which to run the @spi_message
+ * @test: the test to execute, which already contains @msg
+ * @tx: the tx buffer allocated for the test sequence
+ * @rx: the rx buffer allocated for the test sequence
+ *
+ * Returns: error code of spi_sync as well as basic error checking
+ */
+int spi_test_execute_msg(struct spi_device *spi, struct spi_test *test,
+ void *tx, void *rx)
+{
+ struct spi_message *msg = &test->msg;
+ int ret = 0;
+ int i;
+
+ /* only if we do not simulate */
+ if (!simulate_only) {
+ ktime_t start;
+
+ /* dump the complete message before and after the transfer */
+ if (dump_messages == 3)
+ spi_test_dump_message(spi, msg, true);
+
+ start = ktime_get();
+ /* run spi message */
+ ret = spi_sync(spi, msg);
+ test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (ret == -ETIMEDOUT) {
+ dev_info(&spi->dev,
+ "spi-message timed out - rerunning...\n");
+ /* rerun after a few explicit schedules */
+ for (i = 0; i < 16; i++)
+ schedule();
+ ret = spi_sync(spi, msg);
+ }
+ if (ret) {
+ dev_err(&spi->dev,
+ "Failed to execute spi_message: %i\n",
+ ret);
+ goto exit;
+ }
+
+ /* do some extra error checks */
+ if (msg->frame_length != msg->actual_length) {
+ dev_err(&spi->dev,
+ "actual length differs from expected\n");
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* run rx-buffer tests */
+ ret = spi_test_check_loopback_result(spi, msg, tx, rx);
+ if (ret)
+ goto exit;
+
+ ret = spi_test_check_elapsed_time(spi, test);
+ }
+
+ /* if requested or on error dump message (including data) */
+exit:
+ if (dump_messages || ret)
+ spi_test_dump_message(spi, msg,
+ (dump_messages >= 2) || (ret));
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_test_execute_msg);
+
+/**
+ * spi_test_run_test - run an individual spi_test
+ * including all the relevant iterations on:
+ * length and buffer alignment
+ *
+ * @spi: the spi_device to send the messages to
+ * @test: the test which we need to execute
+ * @tx: the tx buffer allocated for the test sequence
+ * @rx: the rx buffer allocated for the test sequence
+ *
+ * Returns: status code of spi_sync or other failures
+ */
+
+int spi_test_run_test(struct spi_device *spi, const struct spi_test *test,
+ void *tx, void *rx)
+{
+ int idx_len;
+ size_t len;
+ size_t tx_align, rx_align;
+ int ret;
+
+ /* test for transfer limits */
+ if (test->transfer_count >= SPI_TEST_MAX_TRANSFERS) {
+ dev_err(&spi->dev,
+ "%s: Exceeded max number of transfers with %i\n",
+ test->description, test->transfer_count);
+ return -E2BIG;
+ }
+
+ /* setting up some values in spi_message
+ * based on some settings in spi_master
+ * some of this can also get done in the run() method
+ */
+
+ /* iterate over all the iterable values using macros
+ * (to make it a bit more readable...
+ */
+#define FOR_EACH_ALIGNMENT(var) \
+ for (var = 0; \
+ var < (test->iterate_##var ? \
+ (spi->master->dma_alignment ? \
+ spi->master->dma_alignment : \
+ test->iterate_##var) : \
+ 1); \
+ var++)
+
+ for (idx_len = 0; idx_len < SPI_TEST_MAX_ITERATE &&
+ (len = test->iterate_len[idx_len]) != -1; idx_len++) {
+ FOR_EACH_ALIGNMENT(tx_align) {
+ FOR_EACH_ALIGNMENT(rx_align) {
+ /* and run the iteration */
+ ret = spi_test_run_iter(spi, test,
+ tx, rx,
+ len,
+ tx_align,
+ rx_align);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_test_run_test);
+
+/**
+ * spi_test_run_tests - run an array of spi_messages tests
+ * @spi: the spi device on which to run the tests
+ * @tests: NULL-terminated array of @spi_test
+ *
+ * Returns: status errors as per @spi_test_run_test()
+ */
+
+int spi_test_run_tests(struct spi_device *spi,
+ struct spi_test *tests)
+{
+ char *rx = NULL, *tx = NULL;
+ int ret = 0, count = 0;
+ struct spi_test *test;
+
+ /* allocate rx/tx buffers of 128kB size without devm
+ * in the hope that is on a page boundary
+ */
+ if (use_vmalloc)
+ rx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
+ else
+ rx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+
+ if (use_vmalloc)
+ tx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
+ else
+ tx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
+ if (!tx) {
+ ret = -ENOMEM;
+ goto err_tx;
+ }
+
+ /* now run the individual tests in the table */
+ for (test = tests, count = 0; test->description[0];
+ test++, count++) {
+ /* only run test if requested */
+ if ((run_only_test > -1) && (count != run_only_test))
+ continue;
+ /* run custom implementation */
+ if (test->run_test)
+ ret = test->run_test(spi, test, tx, rx);
+ else
+ ret = spi_test_run_test(spi, test, tx, rx);
+ if (ret)
+ goto out;
+ /* add some delays so that we can easily
+ * detect the individual tests when using a logic analyzer
+ * we also add scheduling to avoid potential spi_timeouts...
+ */
+ mdelay(100);
+ schedule();
+ }
+
+out:
+ kvfree(tx);
+err_tx:
+ kvfree(rx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_test_run_tests);
diff --git a/drivers/spi/spi-lp8841-rtc.c b/drivers/spi/spi-lp8841-rtc.c
new file mode 100644
index 000000000..2d436541d
--- /dev/null
+++ b/drivers/spi/spi-lp8841-rtc.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SPI master driver for ICP DAS LP-8841 RTC
+ *
+ * Copyright (C) 2016 Sergei Ianovich
+ *
+ * based on
+ *
+ * Dallas DS1302 RTC Support
+ * Copyright (C) 2002 David McCullough
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "spi_lp8841_rtc"
+
+#define SPI_LP8841_RTC_CE 0x01
+#define SPI_LP8841_RTC_CLK 0x02
+#define SPI_LP8841_RTC_nWE 0x04
+#define SPI_LP8841_RTC_MOSI 0x08
+#define SPI_LP8841_RTC_MISO 0x01
+
+/*
+ * REVISIT If there is support for SPI_3WIRE and SPI_LSB_FIRST in SPI
+ * GPIO driver, this SPI driver can be replaced by a simple GPIO driver
+ * providing 3 GPIO pins.
+ */
+
+struct spi_lp8841_rtc {
+ void *iomem;
+ unsigned long state;
+};
+
+static inline void
+setsck(struct spi_lp8841_rtc *data, int is_on)
+{
+ if (is_on)
+ data->state |= SPI_LP8841_RTC_CLK;
+ else
+ data->state &= ~SPI_LP8841_RTC_CLK;
+ writeb(data->state, data->iomem);
+}
+
+static inline void
+setmosi(struct spi_lp8841_rtc *data, int is_on)
+{
+ if (is_on)
+ data->state |= SPI_LP8841_RTC_MOSI;
+ else
+ data->state &= ~SPI_LP8841_RTC_MOSI;
+ writeb(data->state, data->iomem);
+}
+
+static inline int
+getmiso(struct spi_lp8841_rtc *data)
+{
+ return ioread8(data->iomem) & SPI_LP8841_RTC_MISO;
+}
+
+static inline u32
+bitbang_txrx_be_cpha0_lsb(struct spi_lp8841_rtc *data,
+ unsigned usecs, unsigned cpol, unsigned flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+
+ u32 shift = 32 - bits;
+ /* clock starts at inactive polarity */
+ for (; likely(bits); bits--) {
+
+ /* setup LSB (to slave) on leading edge */
+ if ((flags & SPI_MASTER_NO_TX) == 0)
+ setmosi(data, (word & 1));
+
+ usleep_range(usecs, usecs + 1); /* T(setup) */
+
+ /* sample LSB (from slave) on trailing edge */
+ word >>= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= (getmiso(data) << 31);
+
+ setsck(data, !cpol);
+ usleep_range(usecs, usecs + 1);
+
+ setsck(data, cpol);
+ }
+
+ word >>= shift;
+ return word;
+}
+
+static int
+spi_lp8841_rtc_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct spi_lp8841_rtc *data = spi_master_get_devdata(master);
+ unsigned count = t->len;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+ u8 word = 0;
+ int ret = 0;
+
+ if (tx) {
+ data->state &= ~SPI_LP8841_RTC_nWE;
+ writeb(data->state, data->iomem);
+ while (likely(count > 0)) {
+ word = *tx++;
+ bitbang_txrx_be_cpha0_lsb(data, 1, 0,
+ SPI_MASTER_NO_RX, word, 8);
+ count--;
+ }
+ } else if (rx) {
+ data->state |= SPI_LP8841_RTC_nWE;
+ writeb(data->state, data->iomem);
+ while (likely(count > 0)) {
+ word = bitbang_txrx_be_cpha0_lsb(data, 1, 0,
+ SPI_MASTER_NO_TX, word, 8);
+ *rx++ = word;
+ count--;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+ spi_finalize_current_transfer(master);
+
+ return ret;
+}
+
+static void
+spi_lp8841_rtc_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_lp8841_rtc *data = spi_master_get_devdata(spi->master);
+
+ data->state = 0;
+ writeb(data->state, data->iomem);
+ if (enable) {
+ usleep_range(4, 5);
+ data->state |= SPI_LP8841_RTC_CE;
+ writeb(data->state, data->iomem);
+ usleep_range(4, 5);
+ }
+}
+
+static int
+spi_lp8841_rtc_setup(struct spi_device *spi)
+{
+ if ((spi->mode & SPI_CS_HIGH) == 0) {
+ dev_err(&spi->dev, "unsupported active low chip select\n");
+ return -EINVAL;
+ }
+
+ if ((spi->mode & SPI_LSB_FIRST) == 0) {
+ dev_err(&spi->dev, "unsupported MSB first mode\n");
+ return -EINVAL;
+ }
+
+ if ((spi->mode & SPI_3WIRE) == 0) {
+ dev_err(&spi->dev, "unsupported wiring. 3 wires required\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id spi_lp8841_rtc_dt_ids[] = {
+ { .compatible = "icpdas,lp8841-spi-rtc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, spi_lp8841_rtc_dt_ids);
+#endif
+
+static int
+spi_lp8841_rtc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct spi_master *master;
+ struct spi_lp8841_rtc *data;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*data));
+ if (!master)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, master);
+
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->mode_bits = SPI_CS_HIGH | SPI_3WIRE | SPI_LSB_FIRST;
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = 1;
+ master->setup = spi_lp8841_rtc_setup;
+ master->set_cs = spi_lp8841_rtc_set_cs;
+ master->transfer_one = spi_lp8841_rtc_transfer_one;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+#ifdef CONFIG_OF
+ master->dev.of_node = pdev->dev.of_node;
+#endif
+
+ data = spi_master_get_devdata(master);
+
+ data->iomem = devm_platform_ioremap_resource(pdev, 0);
+ ret = PTR_ERR_OR_ZERO(data->iomem);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get IO address\n");
+ goto err_put_master;
+ }
+
+ /* register with the SPI framework */
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register spi master\n");
+ goto err_put_master;
+ }
+
+ return ret;
+
+
+err_put_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+
+static struct platform_driver spi_lp8841_rtc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(spi_lp8841_rtc_dt_ids),
+ },
+ .probe = spi_lp8841_rtc_probe,
+};
+module_platform_driver(spi_lp8841_rtc_driver);
+
+MODULE_DESCRIPTION("SPI master driver for ICP DAS LP-8841 RTC");
+MODULE_AUTHOR("Sergei Ianovich");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
new file mode 100644
index 000000000..0c79193d9
--- /dev/null
+++ b/drivers/spi/spi-mem.c
@@ -0,0 +1,919 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Exceet Electronics GmbH
+ * Copyright (C) 2018 Bootlin
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+#include <linux/dmaengine.h>
+#include <linux/iopoll.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/sched/task_stack.h>
+
+#include "internals.h"
+
+#define SPI_MEM_MAX_BUSWIDTH 8
+
+/**
+ * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
+ * memory operation
+ * @ctlr: the SPI controller requesting this dma_map()
+ * @op: the memory operation containing the buffer to map
+ * @sgt: a pointer to a non-initialized sg_table that will be filled by this
+ * function
+ *
+ * Some controllers might want to do DMA on the data buffer embedded in @op.
+ * This helper prepares everything for you and provides a ready-to-use
+ * sg_table. This function is not intended to be called from spi drivers.
+ * Only SPI controller drivers should use it.
+ * Note that the caller must ensure the memory region pointed by
+ * op->data.buf.{in,out} is DMA-able before calling this function.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
+ const struct spi_mem_op *op,
+ struct sg_table *sgt)
+{
+ struct device *dmadev;
+
+ if (!op->data.nbytes)
+ return -EINVAL;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
+ dmadev = ctlr->dma_tx->device->dev;
+ else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
+ dmadev = ctlr->dma_rx->device->dev;
+ else
+ dmadev = ctlr->dev.parent;
+
+ if (!dmadev)
+ return -EINVAL;
+
+ return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
+
+/**
+ * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
+ * memory operation
+ * @ctlr: the SPI controller requesting this dma_unmap()
+ * @op: the memory operation containing the buffer to unmap
+ * @sgt: a pointer to an sg_table previously initialized by
+ * spi_controller_dma_map_mem_op_data()
+ *
+ * Some controllers might want to do DMA on the data buffer embedded in @op.
+ * This helper prepares things so that the CPU can access the
+ * op->data.buf.{in,out} buffer again.
+ *
+ * This function is not intended to be called from SPI drivers. Only SPI
+ * controller drivers should use it.
+ *
+ * This function should be called after the DMA operation has finished and is
+ * only valid if the previous spi_controller_dma_map_mem_op_data() call
+ * returned 0.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
+ const struct spi_mem_op *op,
+ struct sg_table *sgt)
+{
+ struct device *dmadev;
+
+ if (!op->data.nbytes)
+ return;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
+ dmadev = ctlr->dma_tx->device->dev;
+ else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
+ dmadev = ctlr->dma_rx->device->dev;
+ else
+ dmadev = ctlr->dev.parent;
+
+ spi_unmap_buf(ctlr, dmadev, sgt,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
+
+static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
+{
+ u32 mode = mem->spi->mode;
+
+ switch (buswidth) {
+ case 1:
+ return 0;
+
+ case 2:
+ if ((tx &&
+ (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
+ (!tx &&
+ (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
+ return 0;
+
+ break;
+
+ case 4:
+ if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
+ (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
+ return 0;
+
+ break;
+
+ case 8:
+ if ((tx && (mode & SPI_TX_OCTAL)) ||
+ (!tx && (mode & SPI_RX_OCTAL)))
+ return 0;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return -ENOTSUPP;
+}
+
+static bool spi_mem_check_buswidth(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
+ return false;
+
+ if (op->addr.nbytes &&
+ spi_check_buswidth_req(mem, op->addr.buswidth, true))
+ return false;
+
+ if (op->dummy.nbytes &&
+ spi_check_buswidth_req(mem, op->dummy.buswidth, true))
+ return false;
+
+ if (op->data.dir != SPI_MEM_NO_DATA &&
+ spi_check_buswidth_req(mem, op->data.buswidth,
+ op->data.dir == SPI_MEM_DATA_OUT))
+ return false;
+
+ return true;
+}
+
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+ bool op_is_dtr =
+ op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr;
+
+ if (op_is_dtr) {
+ if (!spi_mem_controller_is_capable(ctlr, dtr))
+ return false;
+
+ if (op->cmd.nbytes != 2)
+ return false;
+ } else {
+ if (op->cmd.nbytes != 1)
+ return false;
+ }
+
+ if (op->data.ecc) {
+ if (!spi_mem_controller_is_capable(ctlr, ecc))
+ return false;
+ }
+
+ return spi_mem_check_buswidth(mem, op);
+}
+EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
+
+static bool spi_mem_buswidth_is_valid(u8 buswidth)
+{
+ if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
+ return false;
+
+ return true;
+}
+
+static int spi_mem_check_op(const struct spi_mem_op *op)
+{
+ if (!op->cmd.buswidth || !op->cmd.nbytes)
+ return -EINVAL;
+
+ if ((op->addr.nbytes && !op->addr.buswidth) ||
+ (op->dummy.nbytes && !op->dummy.buswidth) ||
+ (op->data.nbytes && !op->data.buswidth))
+ return -EINVAL;
+
+ if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
+ !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
+ !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
+ !spi_mem_buswidth_is_valid(op->data.buswidth))
+ return -EINVAL;
+
+ /* Buffers must be DMA-able. */
+ if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_IN &&
+ object_is_on_stack(op->data.buf.in)))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_OUT &&
+ object_is_on_stack(op->data.buf.out)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool spi_mem_internal_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
+ return ctlr->mem_ops->supports_op(mem, op);
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+/**
+ * spi_mem_supports_op() - Check if a memory device and the controller it is
+ * connected to support a specific memory operation
+ * @mem: the SPI memory
+ * @op: the memory operation to check
+ *
+ * Some controllers are only supporting Single or Dual IOs, others might only
+ * support specific opcodes, or it can even be that the controller and device
+ * both support Quad IOs but the hardware prevents you from using it because
+ * only 2 IO lines are connected.
+ *
+ * This function checks whether a specific operation is supported.
+ *
+ * Return: true if @op is supported, false otherwise.
+ */
+bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (spi_mem_check_op(op))
+ return false;
+
+ return spi_mem_internal_supports_op(mem, op);
+}
+EXPORT_SYMBOL_GPL(spi_mem_supports_op);
+
+static int spi_mem_access_start(struct spi_mem *mem)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+
+ /*
+ * Flush the message queue before executing our SPI memory
+ * operation to prevent preemption of regular SPI transfers.
+ */
+ spi_flush_queue(ctlr);
+
+ if (ctlr->auto_runtime_pm) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ctlr->dev.parent);
+ if (ret < 0) {
+ dev_err(&ctlr->dev, "Failed to power device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ mutex_lock(&ctlr->bus_lock_mutex);
+ mutex_lock(&ctlr->io_mutex);
+
+ return 0;
+}
+
+static void spi_mem_access_end(struct spi_mem *mem)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+
+ mutex_unlock(&ctlr->io_mutex);
+ mutex_unlock(&ctlr->bus_lock_mutex);
+
+ if (ctlr->auto_runtime_pm)
+ pm_runtime_put(ctlr->dev.parent);
+}
+
+/**
+ * spi_mem_exec_op() - Execute a memory operation
+ * @mem: the SPI memory
+ * @op: the memory operation to execute
+ *
+ * Executes a memory operation.
+ *
+ * This function first checks that @op is supported and then tries to execute
+ * it.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
+ struct spi_controller *ctlr = mem->spi->controller;
+ struct spi_transfer xfers[4] = { };
+ struct spi_message msg;
+ u8 *tmpbuf;
+ int ret;
+
+ ret = spi_mem_check_op(op);
+ if (ret)
+ return ret;
+
+ if (!spi_mem_internal_supports_op(mem, op))
+ return -ENOTSUPP;
+
+ if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
+ ret = spi_mem_access_start(mem);
+ if (ret)
+ return ret;
+
+ ret = ctlr->mem_ops->exec_op(mem, op);
+
+ spi_mem_access_end(mem);
+
+ /*
+ * Some controllers only optimize specific paths (typically the
+ * read path) and expect the core to use the regular SPI
+ * interface in other cases.
+ */
+ if (!ret || ret != -ENOTSUPP)
+ return ret;
+ }
+
+ tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
+
+ /*
+ * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
+ * we're guaranteed that this buffer is DMA-able, as required by the
+ * SPI layer.
+ */
+ tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ spi_message_init(&msg);
+
+ tmpbuf[0] = op->cmd.opcode;
+ xfers[xferpos].tx_buf = tmpbuf;
+ xfers[xferpos].len = op->cmd.nbytes;
+ xfers[xferpos].tx_nbits = op->cmd.buswidth;
+ spi_message_add_tail(&xfers[xferpos], &msg);
+ xferpos++;
+ totalxferlen++;
+
+ if (op->addr.nbytes) {
+ int i;
+
+ for (i = 0; i < op->addr.nbytes; i++)
+ tmpbuf[i + 1] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+
+ xfers[xferpos].tx_buf = tmpbuf + 1;
+ xfers[xferpos].len = op->addr.nbytes;
+ xfers[xferpos].tx_nbits = op->addr.buswidth;
+ spi_message_add_tail(&xfers[xferpos], &msg);
+ xferpos++;
+ totalxferlen += op->addr.nbytes;
+ }
+
+ if (op->dummy.nbytes) {
+ memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
+ xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
+ xfers[xferpos].len = op->dummy.nbytes;
+ xfers[xferpos].tx_nbits = op->dummy.buswidth;
+ xfers[xferpos].dummy_data = 1;
+ spi_message_add_tail(&xfers[xferpos], &msg);
+ xferpos++;
+ totalxferlen += op->dummy.nbytes;
+ }
+
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ xfers[xferpos].rx_buf = op->data.buf.in;
+ xfers[xferpos].rx_nbits = op->data.buswidth;
+ } else {
+ xfers[xferpos].tx_buf = op->data.buf.out;
+ xfers[xferpos].tx_nbits = op->data.buswidth;
+ }
+
+ xfers[xferpos].len = op->data.nbytes;
+ spi_message_add_tail(&xfers[xferpos], &msg);
+ xferpos++;
+ totalxferlen += op->data.nbytes;
+ }
+
+ ret = spi_sync(mem->spi, &msg);
+
+ kfree(tmpbuf);
+
+ if (ret)
+ return ret;
+
+ if (msg.actual_length != totalxferlen)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_mem_exec_op);
+
+/**
+ * spi_mem_get_name() - Return the SPI mem device name to be used by the
+ * upper layer if necessary
+ * @mem: the SPI memory
+ *
+ * This function allows SPI mem users to retrieve the SPI mem device name.
+ * It is useful if the upper layer needs to expose a custom name for
+ * compatibility reasons.
+ *
+ * Return: a string containing the name of the memory device to be used
+ * by the SPI mem user
+ */
+const char *spi_mem_get_name(struct spi_mem *mem)
+{
+ return mem->name;
+}
+EXPORT_SYMBOL_GPL(spi_mem_get_name);
+
+/**
+ * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
+ * match controller limitations
+ * @mem: the SPI memory
+ * @op: the operation to adjust
+ *
+ * Some controllers have FIFO limitations and must split a data transfer
+ * operation into multiple ones, others require a specific alignment for
+ * optimized accesses. This function allows SPI mem drivers to split a single
+ * operation into multiple sub-operations when required.
+ *
+ * Return: a negative error code if the controller can't properly adjust @op,
+ * 0 otherwise. Note that @op->data.nbytes will be updated if @op
+ * can't be handled in a single step.
+ */
+int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+ size_t len;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
+ return ctlr->mem_ops->adjust_op_size(mem, op);
+
+ if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
+ len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
+
+ if (len > spi_max_transfer_size(mem->spi))
+ return -EINVAL;
+
+ op->data.nbytes = min3((size_t)op->data.nbytes,
+ spi_max_transfer_size(mem->spi),
+ spi_max_message_size(mem->spi) -
+ len);
+ if (!op->data.nbytes)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
+
+static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct spi_mem_op op = desc->info.op_tmpl;
+ int ret;
+
+ op.addr.val = desc->info.offset + offs;
+ op.data.buf.in = buf;
+ op.data.nbytes = len;
+ ret = spi_mem_adjust_op_size(desc->mem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(desc->mem, &op);
+ if (ret)
+ return ret;
+
+ return op.data.nbytes;
+}
+
+static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf)
+{
+ struct spi_mem_op op = desc->info.op_tmpl;
+ int ret;
+
+ op.addr.val = desc->info.offset + offs;
+ op.data.buf.out = buf;
+ op.data.nbytes = len;
+ ret = spi_mem_adjust_op_size(desc->mem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(desc->mem, &op);
+ if (ret)
+ return ret;
+
+ return op.data.nbytes;
+}
+
+/**
+ * spi_mem_dirmap_create() - Create a direct mapping descriptor
+ * @mem: SPI mem device this direct mapping should be created for
+ * @info: direct mapping information
+ *
+ * This function is creating a direct mapping descriptor which can then be used
+ * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
+ * If the SPI controller driver does not support direct mapping, this function
+ * falls back to an implementation using spi_mem_exec_op(), so that the caller
+ * doesn't have to bother implementing a fallback on his own.
+ *
+ * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
+ */
+struct spi_mem_dirmap_desc *
+spi_mem_dirmap_create(struct spi_mem *mem,
+ const struct spi_mem_dirmap_info *info)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+ struct spi_mem_dirmap_desc *desc;
+ int ret = -ENOTSUPP;
+
+ /* Make sure the number of address cycles is between 1 and 8 bytes. */
+ if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
+ return ERR_PTR(-EINVAL);
+
+ /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
+ if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
+ return ERR_PTR(-EINVAL);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->mem = mem;
+ desc->info = *info;
+ if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
+ ret = ctlr->mem_ops->dirmap_create(desc);
+
+ if (ret) {
+ desc->nodirmap = true;
+ if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
+ ret = -ENOTSUPP;
+ else
+ ret = 0;
+ }
+
+ if (ret) {
+ kfree(desc);
+ return ERR_PTR(ret);
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
+
+/**
+ * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
+ * @desc: the direct mapping descriptor to destroy
+ *
+ * This function destroys a direct mapping descriptor previously created by
+ * spi_mem_dirmap_create().
+ */
+void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
+{
+ struct spi_controller *ctlr = desc->mem->spi->controller;
+
+ if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
+ ctlr->mem_ops->dirmap_destroy(desc);
+
+ kfree(desc);
+}
+EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
+
+static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
+{
+ struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
+
+ spi_mem_dirmap_destroy(desc);
+}
+
+/**
+ * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
+ * it to a device
+ * @dev: device the dirmap desc will be attached to
+ * @mem: SPI mem device this direct mapping should be created for
+ * @info: direct mapping information
+ *
+ * devm_ variant of the spi_mem_dirmap_create() function. See
+ * spi_mem_dirmap_create() for more details.
+ *
+ * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
+ */
+struct spi_mem_dirmap_desc *
+devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
+ const struct spi_mem_dirmap_info *info)
+{
+ struct spi_mem_dirmap_desc **ptr, *desc;
+
+ ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ desc = spi_mem_dirmap_create(mem, info);
+ if (IS_ERR(desc)) {
+ devres_free(ptr);
+ } else {
+ *ptr = desc;
+ devres_add(dev, ptr);
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
+
+static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
+{
+ struct spi_mem_dirmap_desc **ptr = res;
+
+ if (WARN_ON(!ptr || !*ptr))
+ return 0;
+
+ return *ptr == data;
+}
+
+/**
+ * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
+ * to a device
+ * @dev: device the dirmap desc is attached to
+ * @desc: the direct mapping descriptor to destroy
+ *
+ * devm_ variant of the spi_mem_dirmap_destroy() function. See
+ * spi_mem_dirmap_destroy() for more details.
+ */
+void devm_spi_mem_dirmap_destroy(struct device *dev,
+ struct spi_mem_dirmap_desc *desc)
+{
+ devres_release(dev, devm_spi_mem_dirmap_release,
+ devm_spi_mem_dirmap_match, desc);
+}
+EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
+
+/**
+ * spi_mem_dirmap_read() - Read data through a direct mapping
+ * @desc: direct mapping descriptor
+ * @offs: offset to start reading from. Note that this is not an absolute
+ * offset, but the offset within the direct mapping which already has
+ * its own offset
+ * @len: length in bytes
+ * @buf: destination buffer. This buffer must be DMA-able
+ *
+ * This function reads data from a memory device using a direct mapping
+ * previously instantiated with spi_mem_dirmap_create().
+ *
+ * Return: the amount of data read from the memory device or a negative error
+ * code. Note that the returned size might be smaller than @len, and the caller
+ * is responsible for calling spi_mem_dirmap_read() again when that happens.
+ */
+ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct spi_controller *ctlr = desc->mem->spi->controller;
+ ssize_t ret;
+
+ if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
+ return -EINVAL;
+
+ if (!len)
+ return 0;
+
+ if (desc->nodirmap) {
+ ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
+ } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
+ ret = spi_mem_access_start(desc->mem);
+ if (ret)
+ return ret;
+
+ ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
+
+ spi_mem_access_end(desc->mem);
+ } else {
+ ret = -ENOTSUPP;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
+
+/**
+ * spi_mem_dirmap_write() - Write data through a direct mapping
+ * @desc: direct mapping descriptor
+ * @offs: offset to start writing from. Note that this is not an absolute
+ * offset, but the offset within the direct mapping which already has
+ * its own offset
+ * @len: length in bytes
+ * @buf: source buffer. This buffer must be DMA-able
+ *
+ * This function writes data to a memory device using a direct mapping
+ * previously instantiated with spi_mem_dirmap_create().
+ *
+ * Return: the amount of data written to the memory device or a negative error
+ * code. Note that the returned size might be smaller than @len, and the caller
+ * is responsible for calling spi_mem_dirmap_write() again when that happens.
+ */
+ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf)
+{
+ struct spi_controller *ctlr = desc->mem->spi->controller;
+ ssize_t ret;
+
+ if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
+ return -EINVAL;
+
+ if (!len)
+ return 0;
+
+ if (desc->nodirmap) {
+ ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
+ } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
+ ret = spi_mem_access_start(desc->mem);
+ if (ret)
+ return ret;
+
+ ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
+
+ spi_mem_access_end(desc->mem);
+ } else {
+ ret = -ENOTSUPP;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
+
+static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct spi_mem_driver, spidrv.driver);
+}
+
+static int spi_mem_read_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 *status)
+{
+ const u8 *bytes = (u8 *)op->data.buf.in;
+ int ret;
+
+ ret = spi_mem_exec_op(mem, op);
+ if (ret)
+ return ret;
+
+ if (op->data.nbytes > 1)
+ *status = ((u16)bytes[0] << 8) | bytes[1];
+ else
+ *status = bytes[0];
+
+ return 0;
+}
+
+/**
+ * spi_mem_poll_status() - Poll memory device status
+ * @mem: SPI memory device
+ * @op: the memory operation to execute
+ * @mask: status bitmask to ckeck
+ * @match: (status & mask) expected value
+ * @initial_delay_us: delay in us before starting to poll
+ * @polling_delay_us: time to sleep between reads in us
+ * @timeout_ms: timeout in milliseconds
+ *
+ * This function polls a status register and returns when
+ * (status & mask) == match or when the timeout has expired.
+ *
+ * Return: 0 in case of success, -ETIMEDOUT in case of error,
+ * -EOPNOTSUPP if not supported.
+ */
+int spi_mem_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_delay_us,
+ u16 timeout_ms)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+ int ret = -EOPNOTSUPP;
+ int read_status_ret;
+ u16 status;
+
+ if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
+ op->data.dir != SPI_MEM_DATA_IN)
+ return -EINVAL;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !mem->spi->cs_gpiod) {
+ ret = spi_mem_access_start(mem);
+ if (ret)
+ return ret;
+
+ ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
+ initial_delay_us, polling_delay_us,
+ timeout_ms);
+
+ spi_mem_access_end(mem);
+ }
+
+ if (ret == -EOPNOTSUPP) {
+ if (!spi_mem_supports_op(mem, op))
+ return ret;
+
+ if (initial_delay_us < 10)
+ udelay(initial_delay_us);
+ else
+ usleep_range((initial_delay_us >> 2) + 1,
+ initial_delay_us);
+
+ ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
+ (read_status_ret || ((status) & mask) == match),
+ polling_delay_us, timeout_ms * 1000, false, mem,
+ op, &status);
+ if (read_status_ret)
+ return read_status_ret;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_mem_poll_status);
+
+static int spi_mem_probe(struct spi_device *spi)
+{
+ struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+ struct spi_controller *ctlr = spi->controller;
+ struct spi_mem *mem;
+
+ mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ mem->spi = spi;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->get_name)
+ mem->name = ctlr->mem_ops->get_name(mem);
+ else
+ mem->name = dev_name(&spi->dev);
+
+ if (IS_ERR_OR_NULL(mem->name))
+ return PTR_ERR_OR_ZERO(mem->name);
+
+ spi_set_drvdata(spi, mem);
+
+ return memdrv->probe(mem);
+}
+
+static void spi_mem_remove(struct spi_device *spi)
+{
+ struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+ struct spi_mem *mem = spi_get_drvdata(spi);
+
+ if (memdrv->remove)
+ memdrv->remove(mem);
+}
+
+static void spi_mem_shutdown(struct spi_device *spi)
+{
+ struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+ struct spi_mem *mem = spi_get_drvdata(spi);
+
+ if (memdrv->shutdown)
+ memdrv->shutdown(mem);
+}
+
+/**
+ * spi_mem_driver_register_with_owner() - Register a SPI memory driver
+ * @memdrv: the SPI memory driver to register
+ * @owner: the owner of this driver
+ *
+ * Registers a SPI memory driver.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+
+int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
+ struct module *owner)
+{
+ memdrv->spidrv.probe = spi_mem_probe;
+ memdrv->spidrv.remove = spi_mem_remove;
+ memdrv->spidrv.shutdown = spi_mem_shutdown;
+
+ return __spi_register_driver(owner, &memdrv->spidrv);
+}
+EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
+
+/**
+ * spi_mem_driver_unregister() - Unregister a SPI memory driver
+ * @memdrv: the SPI memory driver to unregister
+ *
+ * Unregisters a SPI memory driver.
+ */
+void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
+{
+ spi_unregister_driver(&memdrv->spidrv);
+}
+EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
new file mode 100644
index 000000000..1b4195c54
--- /dev/null
+++ b/drivers/spi/spi-meson-spicc.c
@@ -0,0 +1,944 @@
+/*
+ * Driver for Amlogic Meson SPI communication controller (SPICC)
+ *
+ * Copyright (C) BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+
+/*
+ * The Meson SPICC controller could support DMA based transfers, but is not
+ * implemented by the vendor code, and while having the registers documentation
+ * it has never worked on the GXL Hardware.
+ * The PIO mode is the only mode implemented, and due to badly designed HW :
+ * - all transfers are cutted in 16 words burst because the FIFO hangs on
+ * TX underflow, and there is no TX "Half-Empty" interrupt, so we go by
+ * FIFO max size chunk only
+ * - CS management is dumb, and goes UP between every burst, so is really a
+ * "Data Valid" signal than a Chip Select, GPIO link should be used instead
+ * to have a CS go down over the full transfer
+ */
+
+#define SPICC_MAX_BURST 128
+
+/* Register Map */
+#define SPICC_RXDATA 0x00
+
+#define SPICC_TXDATA 0x04
+
+#define SPICC_CONREG 0x08
+#define SPICC_ENABLE BIT(0)
+#define SPICC_MODE_MASTER BIT(1)
+#define SPICC_XCH BIT(2)
+#define SPICC_SMC BIT(3)
+#define SPICC_POL BIT(4)
+#define SPICC_PHA BIT(5)
+#define SPICC_SSCTL BIT(6)
+#define SPICC_SSPOL BIT(7)
+#define SPICC_DRCTL_MASK GENMASK(9, 8)
+#define SPICC_DRCTL_IGNORE 0
+#define SPICC_DRCTL_FALLING 1
+#define SPICC_DRCTL_LOWLEVEL 2
+#define SPICC_CS_MASK GENMASK(13, 12)
+#define SPICC_DATARATE_MASK GENMASK(18, 16)
+#define SPICC_DATARATE_DIV4 0
+#define SPICC_DATARATE_DIV8 1
+#define SPICC_DATARATE_DIV16 2
+#define SPICC_DATARATE_DIV32 3
+#define SPICC_BITLENGTH_MASK GENMASK(24, 19)
+#define SPICC_BURSTLENGTH_MASK GENMASK(31, 25)
+
+#define SPICC_INTREG 0x0c
+#define SPICC_TE_EN BIT(0) /* TX FIFO Empty Interrupt */
+#define SPICC_TH_EN BIT(1) /* TX FIFO Half-Full Interrupt */
+#define SPICC_TF_EN BIT(2) /* TX FIFO Full Interrupt */
+#define SPICC_RR_EN BIT(3) /* RX FIFO Ready Interrupt */
+#define SPICC_RH_EN BIT(4) /* RX FIFO Half-Full Interrupt */
+#define SPICC_RF_EN BIT(5) /* RX FIFO Full Interrupt */
+#define SPICC_RO_EN BIT(6) /* RX FIFO Overflow Interrupt */
+#define SPICC_TC_EN BIT(7) /* Transfert Complete Interrupt */
+
+#define SPICC_DMAREG 0x10
+#define SPICC_DMA_ENABLE BIT(0)
+#define SPICC_TXFIFO_THRESHOLD_MASK GENMASK(5, 1)
+#define SPICC_RXFIFO_THRESHOLD_MASK GENMASK(10, 6)
+#define SPICC_READ_BURST_MASK GENMASK(14, 11)
+#define SPICC_WRITE_BURST_MASK GENMASK(18, 15)
+#define SPICC_DMA_URGENT BIT(19)
+#define SPICC_DMA_THREADID_MASK GENMASK(25, 20)
+#define SPICC_DMA_BURSTNUM_MASK GENMASK(31, 26)
+
+#define SPICC_STATREG 0x14
+#define SPICC_TE BIT(0) /* TX FIFO Empty Interrupt */
+#define SPICC_TH BIT(1) /* TX FIFO Half-Full Interrupt */
+#define SPICC_TF BIT(2) /* TX FIFO Full Interrupt */
+#define SPICC_RR BIT(3) /* RX FIFO Ready Interrupt */
+#define SPICC_RH BIT(4) /* RX FIFO Half-Full Interrupt */
+#define SPICC_RF BIT(5) /* RX FIFO Full Interrupt */
+#define SPICC_RO BIT(6) /* RX FIFO Overflow Interrupt */
+#define SPICC_TC BIT(7) /* Transfert Complete Interrupt */
+
+#define SPICC_PERIODREG 0x18
+#define SPICC_PERIOD GENMASK(14, 0) /* Wait cycles */
+
+#define SPICC_TESTREG 0x1c
+#define SPICC_TXCNT_MASK GENMASK(4, 0) /* TX FIFO Counter */
+#define SPICC_RXCNT_MASK GENMASK(9, 5) /* RX FIFO Counter */
+#define SPICC_SMSTATUS_MASK GENMASK(12, 10) /* State Machine Status */
+#define SPICC_LBC_RO BIT(13) /* Loop Back Control Read-Only */
+#define SPICC_LBC_W1 BIT(14) /* Loop Back Control Write-Only */
+#define SPICC_SWAP_RO BIT(14) /* RX FIFO Data Swap Read-Only */
+#define SPICC_SWAP_W1 BIT(15) /* RX FIFO Data Swap Write-Only */
+#define SPICC_DLYCTL_RO_MASK GENMASK(20, 15) /* Delay Control Read-Only */
+#define SPICC_MO_DELAY_MASK GENMASK(17, 16) /* Master Output Delay */
+#define SPICC_MO_NO_DELAY 0
+#define SPICC_MO_DELAY_1_CYCLE 1
+#define SPICC_MO_DELAY_2_CYCLE 2
+#define SPICC_MO_DELAY_3_CYCLE 3
+#define SPICC_MI_DELAY_MASK GENMASK(19, 18) /* Master Input Delay */
+#define SPICC_MI_NO_DELAY 0
+#define SPICC_MI_DELAY_1_CYCLE 1
+#define SPICC_MI_DELAY_2_CYCLE 2
+#define SPICC_MI_DELAY_3_CYCLE 3
+#define SPICC_MI_CAP_DELAY_MASK GENMASK(21, 20) /* Master Capture Delay */
+#define SPICC_CAP_AHEAD_2_CYCLE 0
+#define SPICC_CAP_AHEAD_1_CYCLE 1
+#define SPICC_CAP_NO_DELAY 2
+#define SPICC_CAP_DELAY_1_CYCLE 3
+#define SPICC_FIFORST_RO_MASK GENMASK(22, 21) /* FIFO Softreset Read-Only */
+#define SPICC_FIFORST_W1_MASK GENMASK(23, 22) /* FIFO Softreset Write-Only */
+
+#define SPICC_DRADDR 0x20 /* Read Address of DMA */
+
+#define SPICC_DWADDR 0x24 /* Write Address of DMA */
+
+#define SPICC_ENH_CTL0 0x38 /* Enhanced Feature */
+#define SPICC_ENH_CLK_CS_DELAY_MASK GENMASK(15, 0)
+#define SPICC_ENH_DATARATE_MASK GENMASK(23, 16)
+#define SPICC_ENH_DATARATE_EN BIT(24)
+#define SPICC_ENH_MOSI_OEN BIT(25)
+#define SPICC_ENH_CLK_OEN BIT(26)
+#define SPICC_ENH_CS_OEN BIT(27)
+#define SPICC_ENH_CLK_CS_DELAY_EN BIT(28)
+#define SPICC_ENH_MAIN_CLK_AO BIT(29)
+
+#define writel_bits_relaxed(mask, val, addr) \
+ writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
+
+struct meson_spicc_data {
+ unsigned int max_speed_hz;
+ unsigned int min_speed_hz;
+ unsigned int fifo_size;
+ bool has_oen;
+ bool has_enhance_clk_div;
+ bool has_pclk;
+};
+
+struct meson_spicc_device {
+ struct spi_master *master;
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct clk *core;
+ struct clk *pclk;
+ struct clk_divider pow2_div;
+ struct clk *clk;
+ struct spi_message *message;
+ struct spi_transfer *xfer;
+ struct completion done;
+ const struct meson_spicc_data *data;
+ u8 *tx_buf;
+ u8 *rx_buf;
+ unsigned int bytes_per_word;
+ unsigned long tx_remain;
+ unsigned long rx_remain;
+ unsigned long xfer_remain;
+};
+
+#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
+
+static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
+{
+ u32 conf;
+
+ if (!spicc->data->has_oen)
+ return;
+
+ conf = readl_relaxed(spicc->base + SPICC_ENH_CTL0) |
+ SPICC_ENH_MOSI_OEN | SPICC_ENH_CLK_OEN | SPICC_ENH_CS_OEN;
+
+ writel_relaxed(conf, spicc->base + SPICC_ENH_CTL0);
+}
+
+static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
+{
+ return !!FIELD_GET(SPICC_TF,
+ readl_relaxed(spicc->base + SPICC_STATREG));
+}
+
+static inline bool meson_spicc_rxready(struct meson_spicc_device *spicc)
+{
+ return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF,
+ readl_relaxed(spicc->base + SPICC_STATREG));
+}
+
+static inline u32 meson_spicc_pull_data(struct meson_spicc_device *spicc)
+{
+ unsigned int bytes = spicc->bytes_per_word;
+ unsigned int byte_shift = 0;
+ u32 data = 0;
+ u8 byte;
+
+ while (bytes--) {
+ byte = *spicc->tx_buf++;
+ data |= (byte & 0xff) << byte_shift;
+ byte_shift += 8;
+ }
+
+ spicc->tx_remain--;
+ return data;
+}
+
+static inline void meson_spicc_push_data(struct meson_spicc_device *spicc,
+ u32 data)
+{
+ unsigned int bytes = spicc->bytes_per_word;
+ unsigned int byte_shift = 0;
+ u8 byte;
+
+ while (bytes--) {
+ byte = (data >> byte_shift) & 0xff;
+ *spicc->rx_buf++ = byte;
+ byte_shift += 8;
+ }
+
+ spicc->rx_remain--;
+}
+
+static inline void meson_spicc_rx(struct meson_spicc_device *spicc)
+{
+ /* Empty RX FIFO */
+ while (spicc->rx_remain &&
+ meson_spicc_rxready(spicc))
+ meson_spicc_push_data(spicc,
+ readl_relaxed(spicc->base + SPICC_RXDATA));
+}
+
+static inline void meson_spicc_tx(struct meson_spicc_device *spicc)
+{
+ /* Fill Up TX FIFO */
+ while (spicc->tx_remain &&
+ !meson_spicc_txfull(spicc))
+ writel_relaxed(meson_spicc_pull_data(spicc),
+ spicc->base + SPICC_TXDATA);
+}
+
+static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc)
+{
+
+ unsigned int burst_len = min_t(unsigned int,
+ spicc->xfer_remain /
+ spicc->bytes_per_word,
+ spicc->data->fifo_size);
+ /* Setup Xfer variables */
+ spicc->tx_remain = burst_len;
+ spicc->rx_remain = burst_len;
+ spicc->xfer_remain -= burst_len * spicc->bytes_per_word;
+
+ /* Setup burst length */
+ writel_bits_relaxed(SPICC_BURSTLENGTH_MASK,
+ FIELD_PREP(SPICC_BURSTLENGTH_MASK,
+ burst_len - 1),
+ spicc->base + SPICC_CONREG);
+
+ /* Fill TX FIFO */
+ meson_spicc_tx(spicc);
+}
+
+static irqreturn_t meson_spicc_irq(int irq, void *data)
+{
+ struct meson_spicc_device *spicc = (void *) data;
+
+ writel_bits_relaxed(SPICC_TC, SPICC_TC, spicc->base + SPICC_STATREG);
+
+ /* Empty RX FIFO */
+ meson_spicc_rx(spicc);
+
+ if (!spicc->xfer_remain) {
+ /* Disable all IRQs */
+ writel(0, spicc->base + SPICC_INTREG);
+
+ complete(&spicc->done);
+
+ return IRQ_HANDLED;
+ }
+
+ /* Setup burst */
+ meson_spicc_setup_burst(spicc);
+
+ /* Start burst */
+ writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
+
+ return IRQ_HANDLED;
+}
+
+static void meson_spicc_auto_io_delay(struct meson_spicc_device *spicc)
+{
+ u32 div, hz;
+ u32 mi_delay, cap_delay;
+ u32 conf;
+
+ if (spicc->data->has_enhance_clk_div) {
+ div = FIELD_GET(SPICC_ENH_DATARATE_MASK,
+ readl_relaxed(spicc->base + SPICC_ENH_CTL0));
+ div++;
+ div <<= 1;
+ } else {
+ div = FIELD_GET(SPICC_DATARATE_MASK,
+ readl_relaxed(spicc->base + SPICC_CONREG));
+ div += 2;
+ div = 1 << div;
+ }
+
+ mi_delay = SPICC_MI_NO_DELAY;
+ cap_delay = SPICC_CAP_AHEAD_2_CYCLE;
+ hz = clk_get_rate(spicc->clk);
+
+ if (hz >= 100000000)
+ cap_delay = SPICC_CAP_DELAY_1_CYCLE;
+ else if (hz >= 80000000)
+ cap_delay = SPICC_CAP_NO_DELAY;
+ else if (hz >= 40000000)
+ cap_delay = SPICC_CAP_AHEAD_1_CYCLE;
+ else if (div >= 16)
+ mi_delay = SPICC_MI_DELAY_3_CYCLE;
+ else if (div >= 8)
+ mi_delay = SPICC_MI_DELAY_2_CYCLE;
+ else if (div >= 6)
+ mi_delay = SPICC_MI_DELAY_1_CYCLE;
+
+ conf = readl_relaxed(spicc->base + SPICC_TESTREG);
+ conf &= ~(SPICC_MO_DELAY_MASK | SPICC_MI_DELAY_MASK
+ | SPICC_MI_CAP_DELAY_MASK);
+ conf |= FIELD_PREP(SPICC_MI_DELAY_MASK, mi_delay);
+ conf |= FIELD_PREP(SPICC_MI_CAP_DELAY_MASK, cap_delay);
+ writel_relaxed(conf, spicc->base + SPICC_TESTREG);
+}
+
+static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
+ struct spi_transfer *xfer)
+{
+ u32 conf, conf_orig;
+
+ /* Read original configuration */
+ conf = conf_orig = readl_relaxed(spicc->base + SPICC_CONREG);
+
+ /* Setup word width */
+ conf &= ~SPICC_BITLENGTH_MASK;
+ conf |= FIELD_PREP(SPICC_BITLENGTH_MASK,
+ (spicc->bytes_per_word << 3) - 1);
+
+ /* Ignore if unchanged */
+ if (conf != conf_orig)
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
+ clk_set_rate(spicc->clk, xfer->speed_hz);
+
+ meson_spicc_auto_io_delay(spicc);
+
+ writel_relaxed(0, spicc->base + SPICC_DMAREG);
+}
+
+static void meson_spicc_reset_fifo(struct meson_spicc_device *spicc)
+{
+ if (spicc->data->has_oen)
+ writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO,
+ SPICC_ENH_MAIN_CLK_AO,
+ spicc->base + SPICC_ENH_CTL0);
+
+ writel_bits_relaxed(SPICC_FIFORST_W1_MASK, SPICC_FIFORST_W1_MASK,
+ spicc->base + SPICC_TESTREG);
+
+ while (meson_spicc_rxready(spicc))
+ readl_relaxed(spicc->base + SPICC_RXDATA);
+
+ if (spicc->data->has_oen)
+ writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO, 0,
+ spicc->base + SPICC_ENH_CTL0);
+}
+
+static int meson_spicc_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ uint64_t timeout;
+
+ /* Store current transfer */
+ spicc->xfer = xfer;
+
+ /* Setup transfer parameters */
+ spicc->tx_buf = (u8 *)xfer->tx_buf;
+ spicc->rx_buf = (u8 *)xfer->rx_buf;
+ spicc->xfer_remain = xfer->len;
+
+ /* Pre-calculate word size */
+ spicc->bytes_per_word =
+ DIV_ROUND_UP(spicc->xfer->bits_per_word, 8);
+
+ if (xfer->len % spicc->bytes_per_word)
+ return -EINVAL;
+
+ /* Setup transfer parameters */
+ meson_spicc_setup_xfer(spicc, xfer);
+
+ meson_spicc_reset_fifo(spicc);
+
+ /* Setup burst */
+ meson_spicc_setup_burst(spicc);
+
+ /* Setup wait for completion */
+ reinit_completion(&spicc->done);
+
+ /* For each byte we wait for 8 cycles of the SPI clock */
+ timeout = 8LL * MSEC_PER_SEC * xfer->len;
+ do_div(timeout, xfer->speed_hz);
+
+ /* Add 10us delay between each fifo bursts */
+ timeout += ((xfer->len >> 4) * 10) / MSEC_PER_SEC;
+
+ /* Increase it twice and add 200 ms tolerance */
+ timeout += timeout + 200;
+
+ /* Start burst */
+ writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
+
+ /* Enable interrupts */
+ writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG);
+
+ if (!wait_for_completion_timeout(&spicc->done, msecs_to_jiffies(timeout)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int meson_spicc_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
+
+ /* Store current message */
+ spicc->message = message;
+
+ /* Enable Master */
+ conf |= SPICC_ENABLE;
+ conf |= SPICC_MODE_MASTER;
+
+ /* SMC = 0 */
+
+ /* Setup transfer mode */
+ if (spi->mode & SPI_CPOL)
+ conf |= SPICC_POL;
+ else
+ conf &= ~SPICC_POL;
+
+ if (spi->mode & SPI_CPHA)
+ conf |= SPICC_PHA;
+ else
+ conf &= ~SPICC_PHA;
+
+ /* SSCTL = 0 */
+
+ if (spi->mode & SPI_CS_HIGH)
+ conf |= SPICC_SSPOL;
+ else
+ conf &= ~SPICC_SSPOL;
+
+ if (spi->mode & SPI_READY)
+ conf |= FIELD_PREP(SPICC_DRCTL_MASK, SPICC_DRCTL_LOWLEVEL);
+ else
+ conf |= FIELD_PREP(SPICC_DRCTL_MASK, SPICC_DRCTL_IGNORE);
+
+ /* Select CS */
+ conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
+
+ /* Default 8bit word */
+ conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
+
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
+ /* Setup no wait cycles by default */
+ writel_relaxed(0, spicc->base + SPICC_PERIODREG);
+
+ writel_bits_relaxed(SPICC_LBC_W1, 0, spicc->base + SPICC_TESTREG);
+
+ return 0;
+}
+
+static int meson_spicc_unprepare_transfer(struct spi_master *master)
+{
+ struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
+
+ /* Disable all IRQs */
+ writel(0, spicc->base + SPICC_INTREG);
+
+ device_reset_optional(&spicc->pdev->dev);
+
+ /* Set default configuration, keeping datarate field */
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
+ return 0;
+}
+
+static int meson_spicc_setup(struct spi_device *spi)
+{
+ if (!spi->controller_state)
+ spi->controller_state = spi_master_get_devdata(spi->master);
+
+ return 0;
+}
+
+static void meson_spicc_cleanup(struct spi_device *spi)
+{
+ spi->controller_state = NULL;
+}
+
+/*
+ * The Clock Mux
+ * x-----------------x x------------x x------\
+ * |---| pow2 fixed div |---| pow2 div |----| |
+ * | x-----------------x x------------x | |
+ * src ---| | mux |-- out
+ * | x-----------------x x------------x | |
+ * |---| enh fixed div |---| enh div |0---| |
+ * x-----------------x x------------x x------/
+ *
+ * Clk path for GX series:
+ * src -> pow2 fixed div -> pow2 div -> out
+ *
+ * Clk path for AXG series:
+ * src -> pow2 fixed div -> pow2 div -> mux -> out
+ * src -> enh fixed div -> enh div -> mux -> out
+ *
+ * Clk path for G12A series:
+ * pclk -> pow2 fixed div -> pow2 div -> mux -> out
+ * pclk -> enh fixed div -> enh div -> mux -> out
+ *
+ * The pow2 divider is tied to the controller HW state, and the
+ * divider is only valid when the controller is initialized.
+ *
+ * A set of clock ops is added to make sure we don't read/set this
+ * clock rate while the controller is in an unknown state.
+ */
+
+static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg)
+ return 0;
+
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg)
+ return -EINVAL;
+
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg)
+ return -EINVAL;
+
+ return clk_divider_ops.set_rate(hw, rate, parent_rate);
+}
+
+static const struct clk_ops meson_spicc_pow2_clk_ops = {
+ .recalc_rate = meson_spicc_pow2_recalc_rate,
+ .determine_rate = meson_spicc_pow2_determine_rate,
+ .set_rate = meson_spicc_pow2_set_rate,
+};
+
+static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *pow2_fixed_div;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
+
+ /* algorithm for pow2 div: rate = freq / 4 / (2 ^ N) */
+
+ pow2_fixed_div = devm_kzalloc(dev, sizeof(*pow2_fixed_div), GFP_KERNEL);
+ if (!pow2_fixed_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#pow2_fixed_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ if (spicc->data->has_pclk)
+ parent_data[0].hw = __clk_get_hw(spicc->pclk);
+ else
+ parent_data[0].hw = __clk_get_hw(spicc->core);
+ init.num_parents = 1;
+
+ pow2_fixed_div->mult = 1,
+ pow2_fixed_div->div = 4,
+ pow2_fixed_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &pow2_fixed_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
+ init.name = name;
+ init.ops = &meson_spicc_pow2_clk_ops;
+ /*
+ * Set NOCACHE here to make sure we read the actual HW value
+ * since we reset the HW after each transfer.
+ */
+ init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
+ parent_data[0].hw = &pow2_fixed_div->hw;
+ init.num_parents = 1;
+
+ spicc->pow2_div.shift = 16,
+ spicc->pow2_div.width = 3,
+ spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
+ spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
+ spicc->pow2_div.hw.init = &init;
+
+ spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
+
+ return 0;
+}
+
+static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *enh_fixed_div;
+ struct clk_divider *enh_div;
+ struct clk_mux *mux;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
+
+ /* algorithm for enh div: rate = freq / 2 / (N + 1) */
+
+ enh_fixed_div = devm_kzalloc(dev, sizeof(*enh_fixed_div), GFP_KERNEL);
+ if (!enh_fixed_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#enh_fixed_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ if (spicc->data->has_pclk)
+ parent_data[0].hw = __clk_get_hw(spicc->pclk);
+ else
+ parent_data[0].hw = __clk_get_hw(spicc->core);
+ init.num_parents = 1;
+
+ enh_fixed_div->mult = 1,
+ enh_fixed_div->div = 2,
+ enh_fixed_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &enh_fixed_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ enh_div = devm_kzalloc(dev, sizeof(*enh_div), GFP_KERNEL);
+ if (!enh_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#enh_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ parent_data[0].hw = &enh_fixed_div->hw;
+ init.num_parents = 1;
+
+ enh_div->shift = 16,
+ enh_div->width = 8,
+ enh_div->reg = spicc->base + SPICC_ENH_CTL0;
+ enh_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &enh_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_mux_ops;
+ parent_data[0].hw = &spicc->pow2_div.hw;
+ parent_data[1].hw = &enh_div->hw;
+ init.num_parents = 2;
+ init.flags = CLK_SET_RATE_PARENT;
+
+ mux->mask = 0x1,
+ mux->shift = 24,
+ mux->reg = spicc->base + SPICC_ENH_CTL0;
+ mux->hw.init = &init;
+
+ spicc->clk = devm_clk_register(dev, &mux->hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
+
+ return 0;
+}
+
+static int meson_spicc_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct meson_spicc_device *spicc;
+ int ret, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spicc));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+ spicc = spi_master_get_devdata(master);
+ spicc->master = master;
+
+ spicc->data = of_device_get_match_data(&pdev->dev);
+ if (!spicc->data) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ ret = -EINVAL;
+ goto out_master;
+ }
+
+ spicc->pdev = pdev;
+ platform_set_drvdata(pdev, spicc);
+
+ init_completion(&spicc->done);
+
+ spicc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spicc->base)) {
+ dev_err(&pdev->dev, "io resource mapping failed\n");
+ ret = PTR_ERR(spicc->base);
+ goto out_master;
+ }
+
+ /* Set master mode and enable controller */
+ writel_relaxed(SPICC_ENABLE | SPICC_MODE_MASTER,
+ spicc->base + SPICC_CONREG);
+
+ /* Disable all IRQs */
+ writel_relaxed(0, spicc->base + SPICC_INTREG);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
+ 0, NULL, spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "irq request failed\n");
+ goto out_master;
+ }
+
+ spicc->core = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(spicc->core)) {
+ dev_err(&pdev->dev, "core clock request failed\n");
+ ret = PTR_ERR(spicc->core);
+ goto out_master;
+ }
+
+ if (spicc->data->has_pclk) {
+ spicc->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(spicc->pclk)) {
+ dev_err(&pdev->dev, "pclk clock request failed\n");
+ ret = PTR_ERR(spicc->pclk);
+ goto out_master;
+ }
+ }
+
+ ret = clk_prepare_enable(spicc->core);
+ if (ret) {
+ dev_err(&pdev->dev, "core clock enable failed\n");
+ goto out_master;
+ }
+
+ ret = clk_prepare_enable(spicc->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "pclk clock enable failed\n");
+ goto out_core_clk;
+ }
+
+ device_reset_optional(&pdev->dev);
+
+ master->num_chipselect = 4;
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) |
+ SPI_BPW_MASK(24) |
+ SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+ master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
+ master->min_speed_hz = spicc->data->min_speed_hz;
+ master->max_speed_hz = spicc->data->max_speed_hz;
+ master->setup = meson_spicc_setup;
+ master->cleanup = meson_spicc_cleanup;
+ master->prepare_message = meson_spicc_prepare_message;
+ master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
+ master->transfer_one = meson_spicc_transfer_one;
+ master->use_gpio_descriptors = true;
+
+ meson_spicc_oen_enable(spicc);
+
+ ret = meson_spicc_pow2_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "pow2 clock registration failed\n");
+ goto out_clk;
+ }
+
+ if (spicc->data->has_enhance_clk_div) {
+ ret = meson_spicc_enh_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "clock registration failed\n");
+ goto out_clk;
+ }
+ }
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi master registration failed\n");
+ goto out_clk;
+ }
+
+ return 0;
+
+out_clk:
+ clk_disable_unprepare(spicc->pclk);
+
+out_core_clk:
+ clk_disable_unprepare(spicc->core);
+
+out_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int meson_spicc_remove(struct platform_device *pdev)
+{
+ struct meson_spicc_device *spicc = platform_get_drvdata(pdev);
+
+ /* Disable SPI */
+ writel(0, spicc->base + SPICC_CONREG);
+
+ clk_disable_unprepare(spicc->core);
+ clk_disable_unprepare(spicc->pclk);
+
+ spi_master_put(spicc->master);
+
+ return 0;
+}
+
+static const struct meson_spicc_data meson_spicc_gx_data = {
+ .max_speed_hz = 30000000,
+ .min_speed_hz = 325000,
+ .fifo_size = 16,
+};
+
+static const struct meson_spicc_data meson_spicc_axg_data = {
+ .max_speed_hz = 80000000,
+ .min_speed_hz = 325000,
+ .fifo_size = 16,
+ .has_oen = true,
+ .has_enhance_clk_div = true,
+};
+
+static const struct meson_spicc_data meson_spicc_g12a_data = {
+ .max_speed_hz = 166666666,
+ .min_speed_hz = 50000,
+ .fifo_size = 15,
+ .has_oen = true,
+ .has_enhance_clk_div = true,
+ .has_pclk = true,
+};
+
+static const struct of_device_id meson_spicc_of_match[] = {
+ {
+ .compatible = "amlogic,meson-gx-spicc",
+ .data = &meson_spicc_gx_data,
+ },
+ {
+ .compatible = "amlogic,meson-axg-spicc",
+ .data = &meson_spicc_axg_data,
+ },
+ {
+ .compatible = "amlogic,meson-g12a-spicc",
+ .data = &meson_spicc_g12a_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
+
+static struct platform_driver meson_spicc_driver = {
+ .probe = meson_spicc_probe,
+ .remove = meson_spicc_remove,
+ .driver = {
+ .name = "meson-spicc",
+ .of_match_table = of_match_ptr(meson_spicc_of_match),
+ },
+};
+
+module_platform_driver(meson_spicc_driver);
+
+MODULE_DESCRIPTION("Meson SPI Communication Controller driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
new file mode 100644
index 000000000..c8ed7815c
--- /dev/null
+++ b/drivers/spi/spi-meson-spifc.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Driver for Amlogic Meson SPI flash controller (SPIFC)
+//
+// Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+//
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/* register map */
+#define REG_CMD 0x00
+#define REG_ADDR 0x04
+#define REG_CTRL 0x08
+#define REG_CTRL1 0x0c
+#define REG_STATUS 0x10
+#define REG_CTRL2 0x14
+#define REG_CLOCK 0x18
+#define REG_USER 0x1c
+#define REG_USER1 0x20
+#define REG_USER2 0x24
+#define REG_USER3 0x28
+#define REG_USER4 0x2c
+#define REG_SLAVE 0x30
+#define REG_SLAVE1 0x34
+#define REG_SLAVE2 0x38
+#define REG_SLAVE3 0x3c
+#define REG_C0 0x40
+#define REG_B8 0x60
+#define REG_MAX 0x7c
+
+/* register fields */
+#define CMD_USER BIT(18)
+#define CTRL_ENABLE_AHB BIT(17)
+#define CLOCK_SOURCE BIT(31)
+#define CLOCK_DIV_SHIFT 12
+#define CLOCK_DIV_MASK (0x3f << CLOCK_DIV_SHIFT)
+#define CLOCK_CNT_HIGH_SHIFT 6
+#define CLOCK_CNT_HIGH_MASK (0x3f << CLOCK_CNT_HIGH_SHIFT)
+#define CLOCK_CNT_LOW_SHIFT 0
+#define CLOCK_CNT_LOW_MASK (0x3f << CLOCK_CNT_LOW_SHIFT)
+#define USER_DIN_EN_MS BIT(0)
+#define USER_CMP_MODE BIT(2)
+#define USER_UC_DOUT_SEL BIT(27)
+#define USER_UC_DIN_SEL BIT(28)
+#define USER_UC_MASK ((BIT(5) - 1) << 27)
+#define USER1_BN_UC_DOUT_SHIFT 17
+#define USER1_BN_UC_DOUT_MASK (0xff << 16)
+#define USER1_BN_UC_DIN_SHIFT 8
+#define USER1_BN_UC_DIN_MASK (0xff << 8)
+#define USER4_CS_ACT BIT(30)
+#define SLAVE_TRST_DONE BIT(4)
+#define SLAVE_OP_MODE BIT(30)
+#define SLAVE_SW_RST BIT(31)
+
+#define SPIFC_BUFFER_SIZE 64
+
+/**
+ * struct meson_spifc
+ * @master: the SPI master
+ * @regmap: regmap for device registers
+ * @clk: input clock of the built-in baud rate generator
+ * @dev: the device structure
+ */
+struct meson_spifc {
+ struct spi_master *master;
+ struct regmap *regmap;
+ struct clk *clk;
+ struct device *dev;
+};
+
+static const struct regmap_config spifc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_MAX,
+};
+
+/**
+ * meson_spifc_wait_ready() - wait for the current operation to terminate
+ * @spifc: the Meson SPI device
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_wait_ready(struct meson_spifc *spifc)
+{
+ unsigned long deadline = jiffies + msecs_to_jiffies(5);
+ u32 data;
+
+ do {
+ regmap_read(spifc->regmap, REG_SLAVE, &data);
+ if (data & SLAVE_TRST_DONE)
+ return 0;
+ cond_resched();
+ } while (!time_after(jiffies, deadline));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * meson_spifc_drain_buffer() - copy data from device buffer to memory
+ * @spifc: the Meson SPI device
+ * @buf: the destination buffer
+ * @len: number of bytes to copy
+ */
+static void meson_spifc_drain_buffer(struct meson_spifc *spifc, u8 *buf,
+ int len)
+{
+ u32 data;
+ int i = 0;
+
+ while (i < len) {
+ regmap_read(spifc->regmap, REG_C0 + i, &data);
+
+ if (len - i >= 4) {
+ *((u32 *)buf) = data;
+ buf += 4;
+ } else {
+ memcpy(buf, &data, len - i);
+ break;
+ }
+ i += 4;
+ }
+}
+
+/**
+ * meson_spifc_fill_buffer() - copy data from memory to device buffer
+ * @spifc: the Meson SPI device
+ * @buf: the source buffer
+ * @len: number of bytes to copy
+ */
+static void meson_spifc_fill_buffer(struct meson_spifc *spifc, const u8 *buf,
+ int len)
+{
+ u32 data;
+ int i = 0;
+
+ while (i < len) {
+ if (len - i >= 4)
+ data = *(u32 *)buf;
+ else
+ memcpy(&data, buf, len - i);
+
+ regmap_write(spifc->regmap, REG_C0 + i, data);
+
+ buf += 4;
+ i += 4;
+ }
+}
+
+/**
+ * meson_spifc_setup_speed() - program the clock divider
+ * @spifc: the Meson SPI device
+ * @speed: desired speed in Hz
+ */
+static void meson_spifc_setup_speed(struct meson_spifc *spifc, u32 speed)
+{
+ unsigned long parent, value;
+ int n;
+
+ parent = clk_get_rate(spifc->clk);
+ n = max_t(int, parent / speed - 1, 1);
+
+ dev_dbg(spifc->dev, "parent %lu, speed %u, n %d\n", parent,
+ speed, n);
+
+ value = (n << CLOCK_DIV_SHIFT) & CLOCK_DIV_MASK;
+ value |= (n << CLOCK_CNT_LOW_SHIFT) & CLOCK_CNT_LOW_MASK;
+ value |= (((n + 1) / 2 - 1) << CLOCK_CNT_HIGH_SHIFT) &
+ CLOCK_CNT_HIGH_MASK;
+
+ regmap_write(spifc->regmap, REG_CLOCK, value);
+}
+
+/**
+ * meson_spifc_txrx() - transfer a chunk of data
+ * @spifc: the Meson SPI device
+ * @xfer: the current SPI transfer
+ * @offset: offset of the data to transfer
+ * @len: length of the data to transfer
+ * @last_xfer: whether this is the last transfer of the message
+ * @last_chunk: whether this is the last chunk of the transfer
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_txrx(struct meson_spifc *spifc,
+ struct spi_transfer *xfer,
+ int offset, int len, bool last_xfer,
+ bool last_chunk)
+{
+ bool keep_cs = true;
+ int ret;
+
+ if (xfer->tx_buf)
+ meson_spifc_fill_buffer(spifc, xfer->tx_buf + offset, len);
+
+ /* enable DOUT stage */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_UC_MASK,
+ USER_UC_DOUT_SEL);
+ regmap_write(spifc->regmap, REG_USER1,
+ (8 * len - 1) << USER1_BN_UC_DOUT_SHIFT);
+
+ /* enable data input during DOUT */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_DIN_EN_MS,
+ USER_DIN_EN_MS);
+
+ if (last_chunk) {
+ if (last_xfer)
+ keep_cs = xfer->cs_change;
+ else
+ keep_cs = !xfer->cs_change;
+ }
+
+ regmap_update_bits(spifc->regmap, REG_USER4, USER4_CS_ACT,
+ keep_cs ? USER4_CS_ACT : 0);
+
+ /* clear transition done bit */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_TRST_DONE, 0);
+ /* start transfer */
+ regmap_update_bits(spifc->regmap, REG_CMD, CMD_USER, CMD_USER);
+
+ ret = meson_spifc_wait_ready(spifc);
+
+ if (!ret && xfer->rx_buf)
+ meson_spifc_drain_buffer(spifc, xfer->rx_buf + offset, len);
+
+ return ret;
+}
+
+/**
+ * meson_spifc_transfer_one() - perform a single transfer
+ * @master: the SPI master
+ * @spi: the SPI device
+ * @xfer: the current SPI transfer
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+ int len, done = 0, ret = 0;
+
+ meson_spifc_setup_speed(spifc, xfer->speed_hz);
+
+ regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB, 0);
+
+ while (done < xfer->len && !ret) {
+ len = min_t(int, xfer->len - done, SPIFC_BUFFER_SIZE);
+ ret = meson_spifc_txrx(spifc, xfer, done, len,
+ spi_transfer_is_last(master, xfer),
+ done + len >= xfer->len);
+ done += len;
+ }
+
+ regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB,
+ CTRL_ENABLE_AHB);
+
+ return ret;
+}
+
+/**
+ * meson_spifc_hw_init() - reset and initialize the SPI controller
+ * @spifc: the Meson SPI device
+ */
+static void meson_spifc_hw_init(struct meson_spifc *spifc)
+{
+ /* reset device */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_SW_RST,
+ SLAVE_SW_RST);
+ /* disable compatible mode */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_CMP_MODE, 0);
+ /* set master mode */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_OP_MODE, 0);
+}
+
+static int meson_spifc_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct meson_spifc *spifc;
+ void __iomem *base;
+ unsigned int rate;
+ int ret = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct meson_spifc));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ spifc = spi_master_get_devdata(master);
+ spifc->dev = &pdev->dev;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_err;
+ }
+
+ spifc->regmap = devm_regmap_init_mmio(spifc->dev, base,
+ &spifc_regmap_config);
+ if (IS_ERR(spifc->regmap)) {
+ ret = PTR_ERR(spifc->regmap);
+ goto out_err;
+ }
+
+ spifc->clk = devm_clk_get(spifc->dev, NULL);
+ if (IS_ERR(spifc->clk)) {
+ dev_err(spifc->dev, "missing clock\n");
+ ret = PTR_ERR(spifc->clk);
+ goto out_err;
+ }
+
+ ret = clk_prepare_enable(spifc->clk);
+ if (ret) {
+ dev_err(spifc->dev, "can't prepare clock\n");
+ goto out_err;
+ }
+
+ rate = clk_get_rate(spifc->clk);
+
+ master->num_chipselect = 1;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+ master->transfer_one = meson_spifc_transfer_one;
+ master->min_speed_hz = rate >> 6;
+ master->max_speed_hz = rate >> 1;
+
+ meson_spifc_hw_init(spifc);
+
+ pm_runtime_set_active(spifc->dev);
+ pm_runtime_enable(spifc->dev);
+
+ ret = devm_spi_register_master(spifc->dev, master);
+ if (ret) {
+ dev_err(spifc->dev, "failed to register spi master\n");
+ goto out_clk;
+ }
+
+ return 0;
+out_clk:
+ clk_disable_unprepare(spifc->clk);
+ pm_runtime_disable(spifc->dev);
+out_err:
+ spi_master_put(master);
+ return ret;
+}
+
+static int meson_spifc_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(&pdev->dev);
+ clk_disable_unprepare(spifc->clk);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int meson_spifc_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ clk_disable_unprepare(spifc->clk);
+
+ return 0;
+}
+
+static int meson_spifc_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+ int ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = clk_prepare_enable(spifc->clk);
+ if (ret)
+ return ret;
+ }
+
+ meson_spifc_hw_init(spifc);
+
+ ret = spi_master_resume(master);
+ if (ret)
+ clk_disable_unprepare(spifc->clk);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int meson_spifc_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spifc->clk);
+
+ return 0;
+}
+
+static int meson_spifc_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+
+ return clk_prepare_enable(spifc->clk);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops meson_spifc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume)
+ SET_RUNTIME_PM_OPS(meson_spifc_runtime_suspend,
+ meson_spifc_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id meson_spifc_dt_match[] = {
+ { .compatible = "amlogic,meson6-spifc", },
+ { .compatible = "amlogic,meson-gxbb-spifc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, meson_spifc_dt_match);
+
+static struct platform_driver meson_spifc_driver = {
+ .probe = meson_spifc_probe,
+ .remove = meson_spifc_remove,
+ .driver = {
+ .name = "meson-spifc",
+ .of_match_table = of_match_ptr(meson_spifc_dt_match),
+ .pm = &meson_spifc_pm_ops,
+ },
+};
+
+module_platform_driver(meson_spifc_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Amlogic Meson SPIFC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
new file mode 100644
index 000000000..19a6a4682
--- /dev/null
+++ b/drivers/spi/spi-microchip-core-qspi.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * Microchip coreQSPI QSPI controller driver
+ *
+ * Copyright (C) 2018-2022 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Naga Sureshkumar Relli <nagasuresh.relli@microchip.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/*
+ * QSPI Control register mask defines
+ */
+#define CONTROL_ENABLE BIT(0)
+#define CONTROL_MASTER BIT(1)
+#define CONTROL_XIP BIT(2)
+#define CONTROL_XIPADDR BIT(3)
+#define CONTROL_CLKIDLE BIT(10)
+#define CONTROL_SAMPLE_MASK GENMASK(12, 11)
+#define CONTROL_MODE0 BIT(13)
+#define CONTROL_MODE12_MASK GENMASK(15, 14)
+#define CONTROL_MODE12_EX_RO BIT(14)
+#define CONTROL_MODE12_EX_RW BIT(15)
+#define CONTROL_MODE12_FULL GENMASK(15, 14)
+#define CONTROL_FLAGSX4 BIT(16)
+#define CONTROL_CLKRATE_MASK GENMASK(27, 24)
+#define CONTROL_CLKRATE_SHIFT 24
+
+/*
+ * QSPI Frames register mask defines
+ */
+#define FRAMES_TOTALBYTES_MASK GENMASK(15, 0)
+#define FRAMES_CMDBYTES_MASK GENMASK(24, 16)
+#define FRAMES_CMDBYTES_SHIFT 16
+#define FRAMES_SHIFT 25
+#define FRAMES_IDLE_MASK GENMASK(29, 26)
+#define FRAMES_IDLE_SHIFT 26
+#define FRAMES_FLAGBYTE BIT(30)
+#define FRAMES_FLAGWORD BIT(31)
+
+/*
+ * QSPI Interrupt Enable register mask defines
+ */
+#define IEN_TXDONE BIT(0)
+#define IEN_RXDONE BIT(1)
+#define IEN_RXAVAILABLE BIT(2)
+#define IEN_TXAVAILABLE BIT(3)
+#define IEN_RXFIFOEMPTY BIT(4)
+#define IEN_TXFIFOFULL BIT(5)
+
+/*
+ * QSPI Status register mask defines
+ */
+#define STATUS_TXDONE BIT(0)
+#define STATUS_RXDONE BIT(1)
+#define STATUS_RXAVAILABLE BIT(2)
+#define STATUS_TXAVAILABLE BIT(3)
+#define STATUS_RXFIFOEMPTY BIT(4)
+#define STATUS_TXFIFOFULL BIT(5)
+#define STATUS_READY BIT(7)
+#define STATUS_FLAGSX4 BIT(8)
+#define STATUS_MASK GENMASK(8, 0)
+
+#define BYTESUPPER_MASK GENMASK(31, 16)
+#define BYTESLOWER_MASK GENMASK(15, 0)
+
+#define MAX_DIVIDER 16
+#define MIN_DIVIDER 0
+#define MAX_DATA_CMD_LEN 256
+
+/* QSPI ready time out value */
+#define TIMEOUT_MS 500
+
+/*
+ * QSPI Register offsets.
+ */
+#define REG_CONTROL (0x00)
+#define REG_FRAMES (0x04)
+#define REG_IEN (0x0c)
+#define REG_STATUS (0x10)
+#define REG_DIRECT_ACCESS (0x14)
+#define REG_UPPER_ACCESS (0x18)
+#define REG_RX_DATA (0x40)
+#define REG_TX_DATA (0x44)
+#define REG_X4_RX_DATA (0x48)
+#define REG_X4_TX_DATA (0x4c)
+#define REG_FRAMESUP (0x50)
+
+/**
+ * struct mchp_coreqspi - Defines qspi driver instance
+ * @regs: Virtual address of the QSPI controller registers
+ * @clk: QSPI Operating clock
+ * @data_completion: completion structure
+ * @op_lock: lock access to the device
+ * @txbuf: TX buffer
+ * @rxbuf: RX buffer
+ * @irq: IRQ number
+ * @tx_len: Number of bytes left to transfer
+ * @rx_len: Number of bytes left to receive
+ */
+struct mchp_coreqspi {
+ void __iomem *regs;
+ struct clk *clk;
+ struct completion data_completion;
+ struct mutex op_lock; /* lock access to the device */
+ u8 *txbuf;
+ u8 *rxbuf;
+ int irq;
+ int tx_len;
+ int rx_len;
+};
+
+static int mchp_coreqspi_set_mode(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
+{
+ u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ /*
+ * The operating mode can be configured based on the command that needs to be send.
+ * bits[15:14]: Sets whether multiple bit SPI operates in normal, extended or full modes.
+ * 00: Normal (single DQ0 TX and single DQ1 RX lines)
+ * 01: Extended RO (command and address bytes on DQ0 only)
+ * 10: Extended RW (command byte on DQ0 only)
+ * 11: Full. (command and address are on all DQ lines)
+ * bit[13]: Sets whether multiple bit SPI uses 2 or 4 bits of data
+ * 0: 2-bits (BSPI)
+ * 1: 4-bits (QSPI)
+ */
+ if (op->data.buswidth == 4 || op->data.buswidth == 2) {
+ control &= ~CONTROL_MODE12_MASK;
+ if (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))
+ control |= CONTROL_MODE12_EX_RO;
+ else if (op->cmd.buswidth == 1)
+ control |= CONTROL_MODE12_EX_RW;
+ else
+ control |= CONTROL_MODE12_FULL;
+
+ control |= CONTROL_MODE0;
+ } else {
+ control &= ~(CONTROL_MODE12_MASK |
+ CONTROL_MODE0);
+ }
+
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ return 0;
+}
+
+static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)
+{
+ u32 control, data;
+
+ if (!qspi->rx_len)
+ return;
+
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ /*
+ * Read 4-bytes from the SPI FIFO in single transaction and then read
+ * the reamaining data byte wise.
+ */
+ control |= CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->rx_len >= 4) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
+ ;
+ data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
+ *(u32 *)qspi->rxbuf = data;
+ qspi->rxbuf += 4;
+ qspi->rx_len -= 4;
+ }
+
+ control &= ~CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->rx_len--) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
+ ;
+ data = readl_relaxed(qspi->regs + REG_RX_DATA);
+ *qspi->rxbuf++ = (data & 0xFF);
+ }
+}
+
+static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word)
+{
+ u32 control, data;
+
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+ control |= CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->tx_len >= 4) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
+ ;
+ data = *(u32 *)qspi->txbuf;
+ qspi->txbuf += 4;
+ qspi->tx_len -= 4;
+ writel_relaxed(data, qspi->regs + REG_X4_TX_DATA);
+ }
+
+ control &= ~CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->tx_len--) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
+ ;
+ data = *qspi->txbuf++;
+ writel_relaxed(data, qspi->regs + REG_TX_DATA);
+ }
+}
+
+static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi)
+{
+ u32 mask = IEN_TXDONE |
+ IEN_RXDONE |
+ IEN_RXAVAILABLE;
+
+ writel_relaxed(mask, qspi->regs + REG_IEN);
+}
+
+static void mchp_coreqspi_disable_ints(struct mchp_coreqspi *qspi)
+{
+ writel_relaxed(0, qspi->regs + REG_IEN);
+}
+
+static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)
+{
+ struct mchp_coreqspi *qspi = (struct mchp_coreqspi *)dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ int intfield = readl_relaxed(qspi->regs + REG_STATUS) & STATUS_MASK;
+
+ if (intfield == 0)
+ return ret;
+
+ if (intfield & IEN_TXDONE) {
+ writel_relaxed(IEN_TXDONE, qspi->regs + REG_STATUS);
+ ret = IRQ_HANDLED;
+ }
+
+ if (intfield & IEN_RXAVAILABLE) {
+ writel_relaxed(IEN_RXAVAILABLE, qspi->regs + REG_STATUS);
+ mchp_coreqspi_read_op(qspi);
+ ret = IRQ_HANDLED;
+ }
+
+ if (intfield & IEN_RXDONE) {
+ writel_relaxed(IEN_RXDONE, qspi->regs + REG_STATUS);
+ complete(&qspi->data_completion);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi)
+{
+ unsigned long clk_hz;
+ u32 control, baud_rate_val = 0;
+
+ clk_hz = clk_get_rate(qspi->clk);
+ if (!clk_hz)
+ return -EINVAL;
+
+ baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz);
+ if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {
+ dev_err(&spi->dev,
+ "could not configure the clock for spi clock %d Hz & system clock %ld Hz\n",
+ spi->max_speed_hz, clk_hz);
+ return -EINVAL;
+ }
+
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+ control |= baud_rate_val << CONTROL_CLKRATE_SHIFT;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ if ((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA))
+ control |= CONTROL_CLKIDLE;
+ else
+ control &= ~CONTROL_CLKIDLE;
+
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ return 0;
+}
+
+static int mchp_coreqspi_setup_op(struct spi_device *spi_dev)
+{
+ struct spi_controller *ctlr = spi_dev->master;
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
+ u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ control |= (CONTROL_MASTER | CONTROL_ENABLE);
+ control &= ~CONTROL_CLKIDLE;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ return 0;
+}
+
+static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
+{
+ u32 idle_cycles = 0;
+ int total_bytes, cmd_bytes, frames, ctrl;
+
+ cmd_bytes = op->cmd.nbytes + op->addr.nbytes;
+ total_bytes = cmd_bytes + op->data.nbytes;
+
+ /*
+ * As per the coreQSPI IP spec,the number of command and data bytes are
+ * controlled by the frames register for each SPI sequence. This supports
+ * the SPI flash memory read and writes sequences as below. so configure
+ * the cmd and total bytes accordingly.
+ * ---------------------------------------------------------------------
+ * TOTAL BYTES | CMD BYTES | What happens |
+ * ______________________________________________________________________
+ * | | |
+ * 1 | 1 | The SPI core will transmit a single byte |
+ * | | and receive data is discarded |
+ * | | |
+ * 1 | 0 | The SPI core will transmit a single byte |
+ * | | and return a single byte |
+ * | | |
+ * 10 | 4 | The SPI core will transmit 4 command |
+ * | | bytes discarding the receive data and |
+ * | | transmits 6 dummy bytes returning the 6 |
+ * | | received bytes and return a single byte |
+ * | | |
+ * 10 | 10 | The SPI core will transmit 10 command |
+ * | | |
+ * 10 | 0 | The SPI core will transmit 10 command |
+ * | | bytes and returning 10 received bytes |
+ * ______________________________________________________________________
+ */
+ if (!(op->data.dir == SPI_MEM_DATA_IN))
+ cmd_bytes = total_bytes;
+
+ frames = total_bytes & BYTESUPPER_MASK;
+ writel_relaxed(frames, qspi->regs + REG_FRAMESUP);
+ frames = total_bytes & BYTESLOWER_MASK;
+ frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT;
+
+ if (op->dummy.buswidth)
+ idle_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
+
+ frames |= idle_cycles << FRAMES_IDLE_SHIFT;
+ ctrl = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ if (ctrl & CONTROL_MODE12_MASK)
+ frames |= (1 << FRAMES_SHIFT);
+
+ frames |= FRAMES_FLAGWORD;
+ writel_relaxed(frames, qspi->regs + REG_FRAMES);
+}
+
+static int mchp_qspi_wait_for_ready(struct spi_mem *mem)
+{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata
+ (mem->spi->master);
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(qspi->regs + REG_STATUS, status,
+ (status & STATUS_READY), 0,
+ TIMEOUT_MS);
+ if (ret) {
+ dev_err(&mem->spi->dev,
+ "Timeout waiting on QSPI ready.\n");
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata
+ (mem->spi->master);
+ u32 address = op->addr.val;
+ u8 opcode = op->cmd.opcode;
+ u8 opaddr[5];
+ int err, i;
+
+ mutex_lock(&qspi->op_lock);
+ err = mchp_qspi_wait_for_ready(mem);
+ if (err)
+ goto error;
+
+ err = mchp_coreqspi_setup_clock(qspi, mem->spi);
+ if (err)
+ goto error;
+
+ err = mchp_coreqspi_set_mode(qspi, op);
+ if (err)
+ goto error;
+
+ reinit_completion(&qspi->data_completion);
+ mchp_coreqspi_config_op(qspi, op);
+ if (op->cmd.opcode) {
+ qspi->txbuf = &opcode;
+ qspi->rxbuf = NULL;
+ qspi->tx_len = op->cmd.nbytes;
+ qspi->rx_len = 0;
+ mchp_coreqspi_write_op(qspi, false);
+ }
+
+ qspi->txbuf = &opaddr[0];
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++)
+ qspi->txbuf[i] = address >> (8 * (op->addr.nbytes - i - 1));
+
+ qspi->rxbuf = NULL;
+ qspi->tx_len = op->addr.nbytes;
+ qspi->rx_len = 0;
+ mchp_coreqspi_write_op(qspi, false);
+ }
+
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ qspi->txbuf = (u8 *)op->data.buf.out;
+ qspi->rxbuf = NULL;
+ qspi->rx_len = 0;
+ qspi->tx_len = op->data.nbytes;
+ mchp_coreqspi_write_op(qspi, true);
+ } else {
+ qspi->txbuf = NULL;
+ qspi->rxbuf = (u8 *)op->data.buf.in;
+ qspi->rx_len = op->data.nbytes;
+ qspi->tx_len = 0;
+ }
+ }
+
+ mchp_coreqspi_enable_ints(qspi);
+
+ if (!wait_for_completion_timeout(&qspi->data_completion, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+error:
+ mutex_unlock(&qspi->op_lock);
+ mchp_coreqspi_disable_ints(qspi);
+
+ return err;
+}
+
+static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if ((op->data.buswidth == 4 || op->data.buswidth == 2) &&
+ (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))) {
+ /*
+ * If the command and address are on DQ0 only, then this
+ * controller doesn't support sending data on dual and
+ * quad lines. but it supports reading data on dual and
+ * quad lines with same configuration as command and
+ * address on DQ0.
+ * i.e. The control register[15:13] :EX_RO(read only) is
+ * meant only for the command and address are on DQ0 but
+ * not to write data, it is just to read.
+ * Ex: 0x34h is Quad Load Program Data which is not
+ * supported. Then the spi-mem layer will iterate over
+ * each command and it will chose the supported one.
+ */
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ return false;
+ }
+
+ return true;
+}
+
+static int mchp_coreqspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ if (op->data.dir == SPI_MEM_DATA_OUT || op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->data.nbytes > MAX_DATA_CMD_LEN)
+ op->data.nbytes = MAX_DATA_CMD_LEN;
+ }
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
+ .adjust_op_size = mchp_coreqspi_adjust_op_size,
+ .supports_op = mchp_coreqspi_supports_op,
+ .exec_op = mchp_coreqspi_exec_op,
+};
+
+static int mchp_coreqspi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct mchp_coreqspi *qspi;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*qspi));
+ if (!ctlr)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "unable to allocate master for QSPI controller\n");
+
+ qspi = spi_controller_get_devdata(ctlr);
+ platform_set_drvdata(pdev, qspi);
+
+ qspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qspi->regs))
+ return dev_err_probe(&pdev->dev, PTR_ERR(qspi->regs),
+ "failed to map registers\n");
+
+ qspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(qspi->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(qspi->clk),
+ "could not get clock\n");
+
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to enable clock\n");
+
+ init_completion(&qspi->data_completion);
+ mutex_init(&qspi->op_lock);
+
+ qspi->irq = platform_get_irq(pdev, 0);
+ if (qspi->irq < 0) {
+ ret = qspi->irq;
+ goto out;
+ }
+
+ ret = devm_request_irq(&pdev->dev, qspi->irq, mchp_coreqspi_isr,
+ IRQF_SHARED, pdev->name, qspi);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed %d\n", ret);
+ goto out;
+ }
+
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mem_ops = &mchp_coreqspi_mem_ops;
+ ctlr->setup = mchp_coreqspi_setup_op;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->dev.of_node = np;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "spi_register_controller failed\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ clk_disable_unprepare(qspi->clk);
+
+ return ret;
+}
+
+static int mchp_coreqspi_remove(struct platform_device *pdev)
+{
+ struct mchp_coreqspi *qspi = platform_get_drvdata(pdev);
+ u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ mchp_coreqspi_disable_ints(qspi);
+ control &= ~CONTROL_ENABLE;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+ clk_disable_unprepare(qspi->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mchp_coreqspi_of_match[] = {
+ { .compatible = "microchip,coreqspi-rtl-v2" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mchp_coreqspi_of_match);
+
+static struct platform_driver mchp_coreqspi_driver = {
+ .probe = mchp_coreqspi_probe,
+ .driver = {
+ .name = "microchip,coreqspi",
+ .of_match_table = mchp_coreqspi_of_match,
+ },
+ .remove = mchp_coreqspi_remove,
+};
+module_platform_driver(mchp_coreqspi_driver);
+
+MODULE_AUTHOR("Naga Sureshkumar Relli <nagasuresh.relli@microchip.com");
+MODULE_DESCRIPTION("Microchip coreQSPI QSPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
new file mode 100644
index 000000000..d352844c7
--- /dev/null
+++ b/drivers/spi/spi-microchip-core.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * Microchip CoreSPI SPI controller driver
+ *
+ * Copyright (c) 2018-2022 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#define MAX_LEN (0xffff)
+#define MAX_CS (8)
+#define DEFAULT_FRAMESIZE (8)
+#define FIFO_DEPTH (32)
+#define CLK_GEN_MODE1_MAX (255)
+#define CLK_GEN_MODE0_MAX (15)
+#define CLK_GEN_MIN (0)
+#define MODE_X_MASK_SHIFT (24)
+
+#define CONTROL_ENABLE BIT(0)
+#define CONTROL_MASTER BIT(1)
+#define CONTROL_RX_DATA_INT BIT(4)
+#define CONTROL_TX_DATA_INT BIT(5)
+#define CONTROL_RX_OVER_INT BIT(6)
+#define CONTROL_TX_UNDER_INT BIT(7)
+#define CONTROL_SPO BIT(24)
+#define CONTROL_SPH BIT(25)
+#define CONTROL_SPS BIT(26)
+#define CONTROL_FRAMEURUN BIT(27)
+#define CONTROL_CLKMODE BIT(28)
+#define CONTROL_BIGFIFO BIT(29)
+#define CONTROL_OENOFF BIT(30)
+#define CONTROL_RESET BIT(31)
+
+#define CONTROL_MODE_MASK GENMASK(3, 2)
+#define MOTOROLA_MODE (0)
+#define CONTROL_FRAMECNT_MASK GENMASK(23, 8)
+#define CONTROL_FRAMECNT_SHIFT (8)
+
+#define STATUS_ACTIVE BIT(14)
+#define STATUS_SSEL BIT(13)
+#define STATUS_FRAMESTART BIT(12)
+#define STATUS_TXFIFO_EMPTY_NEXT_READ BIT(11)
+#define STATUS_TXFIFO_EMPTY BIT(10)
+#define STATUS_TXFIFO_FULL_NEXT_WRITE BIT(9)
+#define STATUS_TXFIFO_FULL BIT(8)
+#define STATUS_RXFIFO_EMPTY_NEXT_READ BIT(7)
+#define STATUS_RXFIFO_EMPTY BIT(6)
+#define STATUS_RXFIFO_FULL_NEXT_WRITE BIT(5)
+#define STATUS_RXFIFO_FULL BIT(4)
+#define STATUS_TX_UNDERRUN BIT(3)
+#define STATUS_RX_OVERFLOW BIT(2)
+#define STATUS_RXDAT_RXED BIT(1)
+#define STATUS_TXDAT_SENT BIT(0)
+
+#define INT_TXDONE BIT(0)
+#define INT_RXRDY BIT(1)
+#define INT_RX_CHANNEL_OVERFLOW BIT(2)
+#define INT_TX_CHANNEL_UNDERRUN BIT(3)
+
+#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \
+ CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
+
+#define REG_CONTROL (0x00)
+#define REG_FRAME_SIZE (0x04)
+#define REG_STATUS (0x08)
+#define REG_INT_CLEAR (0x0c)
+#define REG_RX_DATA (0x10)
+#define REG_TX_DATA (0x14)
+#define REG_CLK_GEN (0x18)
+#define REG_SLAVE_SELECT (0x1c)
+#define SSEL_MASK GENMASK(7, 0)
+#define SSEL_DIRECT BIT(8)
+#define SSELOUT_SHIFT 9
+#define SSELOUT BIT(SSELOUT_SHIFT)
+#define REG_MIS (0x20)
+#define REG_RIS (0x24)
+#define REG_CONTROL2 (0x28)
+#define REG_COMMAND (0x2c)
+#define REG_PKTSIZE (0x30)
+#define REG_CMD_SIZE (0x34)
+#define REG_HWSTATUS (0x38)
+#define REG_STAT8 (0x3c)
+#define REG_CTRL2 (0x48)
+#define REG_FRAMESUP (0x50)
+
+struct mchp_corespi {
+ void __iomem *regs;
+ struct clk *clk;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ u32 clk_gen; /* divider for spi output clock generated by the controller */
+ u32 clk_mode;
+ int irq;
+ int tx_len;
+ int rx_len;
+ int pending;
+};
+
+static inline u32 mchp_corespi_read(struct mchp_corespi *spi, unsigned int reg)
+{
+ return readl(spi->regs + reg);
+}
+
+static inline void mchp_corespi_write(struct mchp_corespi *spi, unsigned int reg, u32 val)
+{
+ writel(val, spi->regs + reg);
+}
+
+static inline void mchp_corespi_enable(struct mchp_corespi *spi)
+{
+ u32 control = mchp_corespi_read(spi, REG_CONTROL);
+
+ control |= CONTROL_ENABLE;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static inline void mchp_corespi_disable(struct mchp_corespi *spi)
+{
+ u32 control = mchp_corespi_read(spi, REG_CONTROL);
+
+ control &= ~CONTROL_ENABLE;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
+{
+ u8 data;
+ int fifo_max, i = 0;
+
+ fifo_max = min(spi->rx_len, FIFO_DEPTH);
+
+ while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
+ data = mchp_corespi_read(spi, REG_RX_DATA);
+
+ if (spi->rx_buf)
+ *spi->rx_buf++ = data;
+ i++;
+ }
+ spi->rx_len -= i;
+ spi->pending -= i;
+}
+
+static void mchp_corespi_enable_ints(struct mchp_corespi *spi)
+{
+ u32 control, mask = INT_ENABLE_MASK;
+
+ mchp_corespi_disable(spi);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+
+ control |= mask;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static void mchp_corespi_disable_ints(struct mchp_corespi *spi)
+{
+ u32 control, mask = INT_ENABLE_MASK;
+
+ mchp_corespi_disable(spi);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control &= ~mask;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
+{
+ u32 control;
+ u16 lenpart;
+
+ /*
+ * Disable the SPI controller. Writes to transfer length have
+ * no effect when the controller is enabled.
+ */
+ mchp_corespi_disable(spi);
+
+ /*
+ * The lower 16 bits of the frame count are stored in the control reg
+ * for legacy reasons, but the upper 16 written to a different register:
+ * FRAMESUP. While both the upper and lower bits can be *READ* from the
+ * FRAMESUP register, writing to the lower 16 bits is a NOP
+ */
+ lenpart = len & 0xffff;
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control &= ~CONTROL_FRAMECNT_MASK;
+ control |= lenpart << CONTROL_FRAMECNT_SHIFT;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ lenpart = len & 0xffff0000;
+ mchp_corespi_write(spi, REG_FRAMESUP, lenpart);
+
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+{
+ u8 byte;
+ int fifo_max, i = 0;
+
+ fifo_max = min(spi->tx_len, FIFO_DEPTH);
+ mchp_corespi_set_xfer_size(spi, fifo_max);
+
+ while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
+ byte = spi->tx_buf ? *spi->tx_buf++ : 0xaa;
+ mchp_corespi_write(spi, REG_TX_DATA, byte);
+ i++;
+ }
+
+ spi->tx_len -= i;
+ spi->pending += i;
+}
+
+static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt)
+{
+ u32 control;
+
+ /*
+ * Disable the SPI controller. Writes to the frame size have
+ * no effect when the controller is enabled.
+ */
+ mchp_corespi_disable(spi);
+
+ mchp_corespi_write(spi, REG_FRAME_SIZE, bt);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static void mchp_corespi_set_cs(struct spi_device *spi, bool disable)
+{
+ u32 reg;
+ struct mchp_corespi *corespi = spi_master_get_devdata(spi->master);
+
+ reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
+ reg &= ~BIT(spi->chip_select);
+ reg |= !disable << spi->chip_select;
+
+ mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
+}
+
+static int mchp_corespi_setup(struct spi_device *spi)
+{
+ struct mchp_corespi *corespi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ /*
+ * Active high slaves need to be specifically set to their inactive
+ * states during probe by adding them to the "control group" & thus
+ * driving their select line low.
+ */
+ if (spi->mode & SPI_CS_HIGH) {
+ reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
+ reg |= BIT(spi->chip_select);
+ mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
+ }
+ return 0;
+}
+
+static void mchp_corespi_init(struct spi_master *master, struct mchp_corespi *spi)
+{
+ unsigned long clk_hz;
+ u32 control = mchp_corespi_read(spi, REG_CONTROL);
+
+ control |= CONTROL_MASTER;
+
+ control &= ~CONTROL_MODE_MASK;
+ control |= MOTOROLA_MODE;
+
+ mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
+
+ /* max. possible spi clock rate is the apb clock rate */
+ clk_hz = clk_get_rate(spi->clk);
+ master->max_speed_hz = clk_hz;
+
+ /*
+ * The controller must be configured so that it doesn't remove Chip
+ * Select until the entire message has been transferred, even if at
+ * some points TX FIFO becomes empty.
+ *
+ * BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames
+ * for the 8 bit transfers that this driver uses.
+ */
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control |= CONTROL_SPS | CONTROL_BIGFIFO;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ mchp_corespi_enable_ints(spi);
+
+ /*
+ * It is required to enable direct mode, otherwise control over the chip
+ * select is relinquished to the hardware. SSELOUT is enabled too so we
+ * can deal with active high slaves.
+ */
+ mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+
+ control &= ~CONTROL_RESET;
+ control |= CONTROL_ENABLE;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi)
+{
+ u32 control;
+
+ mchp_corespi_disable(spi);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ if (spi->clk_mode)
+ control |= CONTROL_CLKMODE;
+ else
+ control &= ~CONTROL_CLKMODE;
+
+ mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen);
+ mchp_corespi_write(spi, REG_CONTROL, control);
+ mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE);
+}
+
+static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode)
+{
+ u32 control, mode_val;
+
+ switch (mode & SPI_MODE_X_MASK) {
+ case SPI_MODE_0:
+ mode_val = 0;
+ break;
+ case SPI_MODE_1:
+ mode_val = CONTROL_SPH;
+ break;
+ case SPI_MODE_2:
+ mode_val = CONTROL_SPO;
+ break;
+ case SPI_MODE_3:
+ mode_val = CONTROL_SPH | CONTROL_SPO;
+ break;
+ }
+
+ /*
+ * Disable the SPI controller. Writes to the frame size have
+ * no effect when the controller is enabled.
+ */
+ mchp_corespi_disable(spi);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT);
+ control |= mode_val;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+}
+
+static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct mchp_corespi *spi = spi_master_get_devdata(master);
+ u32 intfield = mchp_corespi_read(spi, REG_MIS) & 0xf;
+ bool finalise = false;
+
+ /* Interrupt line may be shared and not for us at all */
+ if (intfield == 0)
+ return IRQ_NONE;
+
+ if (intfield & INT_TXDONE) {
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
+
+ if (spi->rx_len)
+ mchp_corespi_read_fifo(spi);
+
+ if (spi->tx_len)
+ mchp_corespi_write_fifo(spi);
+
+ if (!spi->rx_len)
+ finalise = true;
+ }
+
+ if (intfield & INT_RXRDY)
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
+
+ if (intfield & INT_RX_CHANNEL_OVERFLOW) {
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
+ finalise = true;
+ dev_err(&master->dev,
+ "%s: RX OVERFLOW: rxlen: %d, txlen: %d\n", __func__,
+ spi->rx_len, spi->tx_len);
+ }
+
+ if (intfield & INT_TX_CHANNEL_UNDERRUN) {
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_TX_CHANNEL_UNDERRUN);
+ finalise = true;
+ dev_err(&master->dev,
+ "%s: TX UNDERFLOW: rxlen: %d, txlen: %d\n", __func__,
+ spi->rx_len, spi->tx_len);
+ }
+
+ if (finalise)
+ spi_finalize_current_transfer(master);
+
+ return IRQ_HANDLED;
+}
+
+static int mchp_corespi_calculate_clkgen(struct mchp_corespi *spi,
+ unsigned long target_hz)
+{
+ unsigned long clk_hz, spi_hz, clk_gen;
+
+ clk_hz = clk_get_rate(spi->clk);
+ if (!clk_hz)
+ return -EINVAL;
+ spi_hz = min(target_hz, clk_hz);
+
+ /*
+ * There are two possible clock modes for the controller generated
+ * clock's division ratio:
+ * CLK_MODE = 0: 1 / (2^(CLK_GEN + 1)) where CLK_GEN = 0 to 15.
+ * CLK_MODE = 1: 1 / (2 * CLK_GEN + 1) where CLK_GEN = 0 to 255.
+ * First try mode 1, fall back to 0 and if we have tried both modes and
+ * we /still/ can't get a good setting, we then throw the toys out of
+ * the pram and give up
+ * clk_gen is the register name for the clock divider on MPFS.
+ */
+ clk_gen = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1;
+ if (clk_gen > CLK_GEN_MODE1_MAX || clk_gen <= CLK_GEN_MIN) {
+ clk_gen = DIV_ROUND_UP(clk_hz, spi_hz);
+ clk_gen = fls(clk_gen) - 1;
+
+ if (clk_gen > CLK_GEN_MODE0_MAX)
+ return -EINVAL;
+
+ spi->clk_mode = 0;
+ } else {
+ spi->clk_mode = 1;
+ }
+
+ spi->clk_gen = clk_gen;
+ return 0;
+}
+
+static int mchp_corespi_transfer_one(struct spi_master *master,
+ struct spi_device *spi_dev,
+ struct spi_transfer *xfer)
+{
+ struct mchp_corespi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = mchp_corespi_calculate_clkgen(spi, (unsigned long)xfer->speed_hz);
+ if (ret) {
+ dev_err(&master->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz);
+ return ret;
+ }
+
+ mchp_corespi_set_clk_gen(spi);
+
+ spi->tx_buf = xfer->tx_buf;
+ spi->rx_buf = xfer->rx_buf;
+ spi->tx_len = xfer->len;
+ spi->rx_len = xfer->len;
+ spi->pending = 0;
+
+ mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH)
+ ? FIFO_DEPTH : spi->tx_len);
+
+ if (spi->tx_len)
+ mchp_corespi_write_fifo(spi);
+ return 1;
+}
+
+static int mchp_corespi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_device *spi_dev = msg->spi;
+ struct mchp_corespi *spi = spi_master_get_devdata(master);
+
+ mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
+ mchp_corespi_set_mode(spi, spi_dev->mode);
+
+ return 0;
+}
+
+static int mchp_corespi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct mchp_corespi *spi;
+ struct resource *res;
+ u32 num_cs;
+ int ret = 0;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*spi));
+ if (!master)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "unable to allocate master for SPI controller\n");
+
+ platform_set_drvdata(pdev, master);
+
+ if (of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs))
+ num_cs = MAX_CS;
+
+ master->num_chipselect = num_cs;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = mchp_corespi_setup;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->transfer_one = mchp_corespi_transfer_one;
+ master->prepare_message = mchp_corespi_prepare_message;
+ master->set_cs = mchp_corespi_set_cs;
+ master->dev.of_node = pdev->dev.of_node;
+
+ spi = spi_master_get_devdata(master);
+
+ spi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(spi->regs))
+ return PTR_ERR(spi->regs);
+
+ spi->irq = platform_get_irq(pdev, 0);
+ if (spi->irq <= 0)
+ return dev_err_probe(&pdev->dev, -ENXIO,
+ "invalid IRQ %d for SPI controller\n",
+ spi->irq);
+
+ ret = devm_request_irq(&pdev->dev, spi->irq, mchp_corespi_interrupt,
+ IRQF_SHARED, dev_name(&pdev->dev), master);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not request irq\n");
+
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk),
+ "could not get clk\n");
+
+ ret = clk_prepare_enable(spi->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to enable clock\n");
+
+ mchp_corespi_init(master, spi);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ mchp_corespi_disable(spi);
+ clk_disable_unprepare(spi->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "unable to register master for SPI controller\n");
+ }
+
+ dev_info(&pdev->dev, "Registered SPI controller %d\n", master->bus_num);
+
+ return 0;
+}
+
+static int mchp_corespi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mchp_corespi *spi = spi_master_get_devdata(master);
+
+ mchp_corespi_disable_ints(spi);
+ clk_disable_unprepare(spi->clk);
+ mchp_corespi_disable(spi);
+
+ return 0;
+}
+
+#define MICROCHIP_SPI_PM_OPS (NULL)
+
+/*
+ * Platform driver data structure
+ */
+
+#if defined(CONFIG_OF)
+static const struct of_device_id mchp_corespi_dt_ids[] = {
+ { .compatible = "microchip,mpfs-spi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mchp_corespi_dt_ids);
+#endif
+
+static struct platform_driver mchp_corespi_driver = {
+ .probe = mchp_corespi_probe,
+ .driver = {
+ .name = "microchip-corespi",
+ .pm = MICROCHIP_SPI_PM_OPS,
+ .of_match_table = of_match_ptr(mchp_corespi_dt_ids),
+ },
+ .remove = mchp_corespi_remove,
+};
+module_platform_driver(mchp_corespi_driver);
+MODULE_DESCRIPTION("Microchip coreSPI SPI controller driver");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
new file mode 100644
index 000000000..03630359c
--- /dev/null
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MPC512x PSC in SPI mode driver.
+ *
+ * Copyright (C) 2007,2008 Freescale Semiconductor Inc.
+ * Original port from 52xx driver:
+ * Hongjun Chen <hong-jun.chen@freescale.com>
+ *
+ * Fork of mpc52xx_psc_spi.c:
+ * Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/fsl_devices.h>
+#include <asm/mpc52xx_psc.h>
+
+enum {
+ TYPE_MPC5121,
+ TYPE_MPC5125,
+};
+
+/*
+ * This macro abstracts the differences in the PSC register layout between
+ * MPC5121 (which uses a struct mpc52xx_psc) and MPC5125 (using mpc5125_psc).
+ */
+#define psc_addr(mps, regname) ({ \
+ void *__ret = NULL; \
+ switch (mps->type) { \
+ case TYPE_MPC5121: { \
+ struct mpc52xx_psc __iomem *psc = mps->psc; \
+ __ret = &psc->regname; \
+ }; \
+ break; \
+ case TYPE_MPC5125: { \
+ struct mpc5125_psc __iomem *psc = mps->psc; \
+ __ret = &psc->regname; \
+ }; \
+ break; \
+ } \
+ __ret; })
+
+struct mpc512x_psc_spi {
+ void (*cs_control)(struct spi_device *spi, bool on);
+
+ /* driver internal data */
+ int type;
+ void __iomem *psc;
+ struct mpc512x_psc_fifo __iomem *fifo;
+ unsigned int irq;
+ u8 bits_per_word;
+ struct clk *clk_mclk;
+ struct clk *clk_ipg;
+ u32 mclk_rate;
+
+ struct completion txisrdone;
+};
+
+/* controller state */
+struct mpc512x_psc_spi_cs {
+ int bits_per_word;
+ int speed_hz;
+};
+
+/* set clock freq, clock ramp, bits per work
+ * if t is NULL then reset the values to the default values
+ */
+static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc512x_psc_spi_cs *cs = spi->controller_state;
+
+ cs->speed_hz = (t && t->speed_hz)
+ ? t->speed_hz : spi->max_speed_hz;
+ cs->bits_per_word = (t && t->bits_per_word)
+ ? t->bits_per_word : spi->bits_per_word;
+ cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8;
+ return 0;
+}
+
+static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
+{
+ struct mpc512x_psc_spi_cs *cs = spi->controller_state;
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+ u32 sicr;
+ u32 ccr;
+ int speed;
+ u16 bclkdiv;
+
+ sicr = in_be32(psc_addr(mps, sicr));
+
+ /* Set clock phase and polarity */
+ if (spi->mode & SPI_CPHA)
+ sicr |= 0x00001000;
+ else
+ sicr &= ~0x00001000;
+
+ if (spi->mode & SPI_CPOL)
+ sicr |= 0x00002000;
+ else
+ sicr &= ~0x00002000;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ sicr |= 0x10000000;
+ else
+ sicr &= ~0x10000000;
+ out_be32(psc_addr(mps, sicr), sicr);
+
+ ccr = in_be32(psc_addr(mps, ccr));
+ ccr &= 0xFF000000;
+ speed = cs->speed_hz;
+ if (!speed)
+ speed = 1000000; /* default 1MHz */
+ bclkdiv = (mps->mclk_rate / speed) - 1;
+
+ ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
+ out_be32(psc_addr(mps, ccr), ccr);
+ mps->bits_per_word = cs->bits_per_word;
+
+ if (spi->cs_gpiod) {
+ if (mps->cs_control)
+ /* boardfile override */
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+ else
+ /* gpiolib will deal with the inversion */
+ gpiod_set_value(spi->cs_gpiod, 1);
+ }
+}
+
+static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+
+ if (spi->cs_gpiod) {
+ if (mps->cs_control)
+ /* boardfile override */
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+ else
+ /* gpiolib will deal with the inversion */
+ gpiod_set_value(spi->cs_gpiod, 0);
+ }
+}
+
+/* extract and scale size field in txsz or rxsz */
+#define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2);
+
+#define EOFBYTE 1
+
+static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+ size_t tx_len = t->len;
+ size_t rx_len = t->len;
+ u8 *tx_buf = (u8 *)t->tx_buf;
+ u8 *rx_buf = (u8 *)t->rx_buf;
+
+ if (!tx_buf && !rx_buf && t->len)
+ return -EINVAL;
+
+ while (rx_len || tx_len) {
+ size_t txcount;
+ u8 data;
+ size_t fifosz;
+ size_t rxcount;
+ int rxtries;
+
+ /*
+ * send the TX bytes in as large a chunk as possible
+ * but neither exceed the TX nor the RX FIFOs
+ */
+ fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz));
+ txcount = min(fifosz, tx_len);
+ fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->rxsz));
+ fifosz -= in_be32(&fifo->rxcnt) + 1;
+ txcount = min(fifosz, txcount);
+ if (txcount) {
+
+ /* fill the TX FIFO */
+ while (txcount-- > 0) {
+ data = tx_buf ? *tx_buf++ : 0;
+ if (tx_len == EOFBYTE && t->cs_change)
+ setbits32(&fifo->txcmd,
+ MPC512x_PSC_FIFO_EOF);
+ out_8(&fifo->txdata_8, data);
+ tx_len--;
+ }
+
+ /* have the ISR trigger when the TX FIFO is empty */
+ reinit_completion(&mps->txisrdone);
+ out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
+ out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
+ wait_for_completion(&mps->txisrdone);
+ }
+
+ /*
+ * consume as much RX data as the FIFO holds, while we
+ * iterate over the transfer's TX data length
+ *
+ * only insist in draining all the remaining RX bytes
+ * when the TX bytes were exhausted (that's at the very
+ * end of this transfer, not when still iterating over
+ * the transfer's chunks)
+ */
+ rxtries = 50;
+ do {
+
+ /*
+ * grab whatever was in the FIFO when we started
+ * looking, don't bother fetching what was added to
+ * the FIFO while we read from it -- we'll return
+ * here eventually and prefer sending out remaining
+ * TX data
+ */
+ fifosz = in_be32(&fifo->rxcnt);
+ rxcount = min(fifosz, rx_len);
+ while (rxcount-- > 0) {
+ data = in_8(&fifo->rxdata_8);
+ if (rx_buf)
+ *rx_buf++ = data;
+ rx_len--;
+ }
+
+ /*
+ * come back later if there still is TX data to send,
+ * bail out of the RX drain loop if all of the TX data
+ * was sent and all of the RX data was received (i.e.
+ * when the transmission has completed)
+ */
+ if (tx_len)
+ break;
+ if (!rx_len)
+ break;
+
+ /*
+ * TX data transmission has completed while RX data
+ * is still pending -- that's a transient situation
+ * which depends on wire speed and specific
+ * hardware implementation details (buffering) yet
+ * should resolve very quickly
+ *
+ * just yield for a moment to not hog the CPU for
+ * too long when running SPI at low speed
+ *
+ * the timeout range is rather arbitrary and tries
+ * to balance throughput against system load; the
+ * chosen values result in a minimal timeout of 50
+ * times 10us and thus work at speeds as low as
+ * some 20kbps, while the maximum timeout at the
+ * transfer's end could be 5ms _if_ nothing else
+ * ticks in the system _and_ RX data still wasn't
+ * received, which only occurs in situations that
+ * are exceptional; removing the unpredictability
+ * of the timeout either decreases throughput
+ * (longer timeouts), or puts more load on the
+ * system (fixed short timeouts) or requires the
+ * use of a timeout API instead of a counter and an
+ * unknown inner delay
+ */
+ usleep_range(10, 100);
+
+ } while (--rxtries > 0);
+ if (!tx_len && rx_len && !rxtries) {
+ /*
+ * not enough RX bytes even after several retries
+ * and the resulting rather long timeout?
+ */
+ rxcount = in_be32(&fifo->rxcnt);
+ dev_warn(&spi->dev,
+ "short xfer, missing %zd RX bytes, FIFO level %zd\n",
+ rx_len, rxcount);
+ }
+
+ /*
+ * drain and drop RX data which "should not be there" in
+ * the first place, for undisturbed transmission this turns
+ * into a NOP (except for the FIFO level fetch)
+ */
+ if (!tx_len && !rx_len) {
+ while (in_be32(&fifo->rxcnt))
+ in_8(&fifo->rxdata_8);
+ }
+
+ }
+ return 0;
+}
+
+static int mpc512x_psc_spi_msg_xfer(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct spi_device *spi;
+ unsigned cs_change;
+ int status;
+ struct spi_transfer *t;
+
+ spi = m->spi;
+ cs_change = 1;
+ status = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ status = mpc512x_psc_spi_transfer_setup(spi, t);
+ if (status < 0)
+ break;
+
+ if (cs_change)
+ mpc512x_psc_spi_activate_cs(spi);
+ cs_change = t->cs_change;
+
+ status = mpc512x_psc_spi_transfer_rxtx(spi, t);
+ if (status)
+ break;
+ m->actual_length += t->len;
+
+ spi_transfer_delay_exec(t);
+
+ if (cs_change)
+ mpc512x_psc_spi_deactivate_cs(spi);
+ }
+
+ m->status = status;
+ if (m->complete)
+ m->complete(m->context);
+
+ if (status || !cs_change)
+ mpc512x_psc_spi_deactivate_cs(spi);
+
+ mpc512x_psc_spi_transfer_setup(spi, NULL);
+
+ spi_finalize_current_message(master);
+ return status;
+}
+
+static int mpc512x_psc_spi_prep_xfer_hw(struct spi_master *master)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
+
+ dev_dbg(&master->dev, "%s()\n", __func__);
+
+ /* Zero MR2 */
+ in_8(psc_addr(mps, mr2));
+ out_8(psc_addr(mps, mr2), 0x0);
+
+ /* enable transmitter/receiver */
+ out_8(psc_addr(mps, command), MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
+
+ return 0;
+}
+
+static int mpc512x_psc_spi_unprep_xfer_hw(struct spi_master *master)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+
+ dev_dbg(&master->dev, "%s()\n", __func__);
+
+ /* disable transmitter/receiver and fifo interrupt */
+ out_8(psc_addr(mps, command), MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+ out_be32(&fifo->tximr, 0);
+
+ return 0;
+}
+
+static int mpc512x_psc_spi_setup(struct spi_device *spi)
+{
+ struct mpc512x_psc_spi_cs *cs = spi->controller_state;
+
+ if (spi->bits_per_word % 8)
+ return -EINVAL;
+
+ if (!cs) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+
+ spi->controller_state = cs;
+ }
+
+ cs->bits_per_word = spi->bits_per_word;
+ cs->speed_hz = spi->max_speed_hz;
+
+ return 0;
+}
+
+static void mpc512x_psc_spi_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+
+static int mpc512x_psc_spi_port_config(struct spi_master *master,
+ struct mpc512x_psc_spi *mps)
+{
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+ u32 sicr;
+ u32 ccr;
+ int speed;
+ u16 bclkdiv;
+
+ /* Reset the PSC into a known state */
+ out_8(psc_addr(mps, command), MPC52xx_PSC_RST_RX);
+ out_8(psc_addr(mps, command), MPC52xx_PSC_RST_TX);
+ out_8(psc_addr(mps, command), MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+
+ /* Disable psc interrupts all useful interrupts are in fifo */
+ out_be16(psc_addr(mps, isr_imr.imr), 0);
+
+ /* Disable fifo interrupts, will be enabled later */
+ out_be32(&fifo->tximr, 0);
+ out_be32(&fifo->rximr, 0);
+
+ /* Setup fifo slice address and size */
+ /*out_be32(&fifo->txsz, 0x0fe00004);*/
+ /*out_be32(&fifo->rxsz, 0x0ff00004);*/
+
+ sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */
+ 0x00800000 | /* GenClk = 1 -- internal clk */
+ 0x00008000 | /* SPI = 1 */
+ 0x00004000 | /* MSTR = 1 -- SPI master */
+ 0x00000800; /* UseEOF = 1 -- SS low until EOF */
+
+ out_be32(psc_addr(mps, sicr), sicr);
+
+ ccr = in_be32(psc_addr(mps, ccr));
+ ccr &= 0xFF000000;
+ speed = 1000000; /* default 1MHz */
+ bclkdiv = (mps->mclk_rate / speed) - 1;
+ ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
+ out_be32(psc_addr(mps, ccr), ccr);
+
+ /* Set 2ms DTL delay */
+ out_8(psc_addr(mps, ctur), 0x00);
+ out_8(psc_addr(mps, ctlr), 0x82);
+
+ /* we don't use the alarms */
+ out_be32(&fifo->rxalarm, 0xfff);
+ out_be32(&fifo->txalarm, 0);
+
+ /* Enable FIFO slices for Rx/Tx */
+ out_be32(&fifo->rxcmd,
+ MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA);
+ out_be32(&fifo->txcmd,
+ MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA);
+
+ mps->bits_per_word = 8;
+
+ return 0;
+}
+
+static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
+{
+ struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id;
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+
+ /* clear interrupt and wake up the rx/tx routine */
+ if (in_be32(&fifo->txisr) &
+ in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) {
+ out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
+ out_be32(&fifo->tximr, 0);
+ complete(&mps->txisrdone);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
+ u32 size, unsigned int irq)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc512x_psc_spi *mps;
+ struct spi_master *master;
+ int ret;
+ void *tempp;
+ struct clk *clk;
+
+ master = spi_alloc_master(dev, sizeof(*mps));
+ if (master == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, master);
+ mps = spi_master_get_devdata(master);
+ mps->type = (int)of_device_get_match_data(dev);
+ mps->irq = irq;
+
+ if (pdata) {
+ mps->cs_control = pdata->cs_control;
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->max_chipselect;
+ }
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->setup = mpc512x_psc_spi_setup;
+ master->prepare_transfer_hardware = mpc512x_psc_spi_prep_xfer_hw;
+ master->transfer_one_message = mpc512x_psc_spi_msg_xfer;
+ master->unprepare_transfer_hardware = mpc512x_psc_spi_unprep_xfer_hw;
+ master->use_gpio_descriptors = true;
+ master->cleanup = mpc512x_psc_spi_cleanup;
+ master->dev.of_node = dev->of_node;
+
+ tempp = devm_ioremap(dev, regaddr, size);
+ if (!tempp) {
+ dev_err(dev, "could not ioremap I/O port range\n");
+ ret = -EFAULT;
+ goto free_master;
+ }
+ mps->psc = tempp;
+ mps->fifo =
+ (struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc));
+ ret = devm_request_irq(dev, mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
+ "mpc512x-psc-spi", mps);
+ if (ret)
+ goto free_master;
+ init_completion(&mps->txisrdone);
+
+ clk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto free_master;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto free_master;
+ mps->clk_mclk = clk;
+ mps->mclk_rate = clk_get_rate(clk);
+
+ clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto free_mclk_clock;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto free_mclk_clock;
+ mps->clk_ipg = clk;
+
+ ret = mpc512x_psc_spi_port_config(master, mps);
+ if (ret < 0)
+ goto free_ipg_clock;
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret < 0)
+ goto free_ipg_clock;
+
+ return ret;
+
+free_ipg_clock:
+ clk_disable_unprepare(mps->clk_ipg);
+free_mclk_clock:
+ clk_disable_unprepare(mps->clk_mclk);
+free_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int mpc512x_psc_spi_do_remove(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(mps->clk_mclk);
+ clk_disable_unprepare(mps->clk_ipg);
+
+ return 0;
+}
+
+static int mpc512x_psc_spi_of_probe(struct platform_device *op)
+{
+ const u32 *regaddr_p;
+ u64 regaddr64, size64;
+
+ regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
+ if (!regaddr_p) {
+ dev_err(&op->dev, "Invalid PSC address\n");
+ return -EINVAL;
+ }
+ regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
+
+ return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
+ irq_of_parse_and_map(op->dev.of_node, 0));
+}
+
+static int mpc512x_psc_spi_of_remove(struct platform_device *op)
+{
+ return mpc512x_psc_spi_do_remove(&op->dev);
+}
+
+static const struct of_device_id mpc512x_psc_spi_of_match[] = {
+ { .compatible = "fsl,mpc5121-psc-spi", .data = (void *)TYPE_MPC5121 },
+ { .compatible = "fsl,mpc5125-psc-spi", .data = (void *)TYPE_MPC5125 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
+
+static struct platform_driver mpc512x_psc_spi_of_driver = {
+ .probe = mpc512x_psc_spi_of_probe,
+ .remove = mpc512x_psc_spi_of_remove,
+ .driver = {
+ .name = "mpc512x-psc-spi",
+ .of_match_table = mpc512x_psc_spi_of_match,
+ },
+};
+module_platform_driver(mpc512x_psc_spi_of_driver);
+
+MODULE_AUTHOR("John Rigby");
+MODULE_DESCRIPTION("MPC512x PSC SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
new file mode 100644
index 000000000..609311231
--- /dev/null
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MPC52xx PSC in SPI mode driver.
+ *
+ * Maintainer: Dragos Carp
+ *
+ * Copyright (C) 2006 TOPTICA Photonics AG.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/fsl_devices.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
+
+#include <asm/mpc52xx.h>
+#include <asm/mpc52xx_psc.h>
+
+#define MCLK 20000000 /* PSC port MClk in hz */
+
+struct mpc52xx_psc_spi {
+ /* fsl_spi_platform data */
+ void (*cs_control)(struct spi_device *spi, bool on);
+ u32 sysclk;
+
+ /* driver internal data */
+ struct mpc52xx_psc __iomem *psc;
+ struct mpc52xx_psc_fifo __iomem *fifo;
+ unsigned int irq;
+ u8 bits_per_word;
+
+ struct completion done;
+};
+
+/* controller state */
+struct mpc52xx_psc_spi_cs {
+ int bits_per_word;
+ int speed_hz;
+};
+
+/* set clock freq, clock ramp, bits per work
+ * if t is NULL then reset the values to the default values
+ */
+static int mpc52xx_psc_spi_transfer_setup(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc52xx_psc_spi_cs *cs = spi->controller_state;
+
+ cs->speed_hz = (t && t->speed_hz)
+ ? t->speed_hz : spi->max_speed_hz;
+ cs->bits_per_word = (t && t->bits_per_word)
+ ? t->bits_per_word : spi->bits_per_word;
+ cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8;
+ return 0;
+}
+
+static void mpc52xx_psc_spi_activate_cs(struct spi_device *spi)
+{
+ struct mpc52xx_psc_spi_cs *cs = spi->controller_state;
+ struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ u32 sicr;
+ u16 ccr;
+
+ sicr = in_be32(&psc->sicr);
+
+ /* Set clock phase and polarity */
+ if (spi->mode & SPI_CPHA)
+ sicr |= 0x00001000;
+ else
+ sicr &= ~0x00001000;
+ if (spi->mode & SPI_CPOL)
+ sicr |= 0x00002000;
+ else
+ sicr &= ~0x00002000;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ sicr |= 0x10000000;
+ else
+ sicr &= ~0x10000000;
+ out_be32(&psc->sicr, sicr);
+
+ /* Set clock frequency and bits per word
+ * Because psc->ccr is defined as 16bit register instead of 32bit
+ * just set the lower byte of BitClkDiv
+ */
+ ccr = in_be16((u16 __iomem *)&psc->ccr);
+ ccr &= 0xFF00;
+ if (cs->speed_hz)
+ ccr |= (MCLK / cs->speed_hz - 1) & 0xFF;
+ else /* by default SPI Clk 1MHz */
+ ccr |= (MCLK / 1000000 - 1) & 0xFF;
+ out_be16((u16 __iomem *)&psc->ccr, ccr);
+ mps->bits_per_word = cs->bits_per_word;
+
+ if (mps->cs_control)
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+}
+
+static void mpc52xx_psc_spi_deactivate_cs(struct spi_device *spi)
+{
+ struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
+
+ if (mps->cs_control)
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+}
+
+#define MPC52xx_PSC_BUFSIZE (MPC52xx_PSC_RFNUM_MASK + 1)
+/* wake up when 80% fifo full */
+#define MPC52xx_PSC_RFALARM (MPC52xx_PSC_BUFSIZE * 20 / 100)
+
+static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
+ unsigned rb = 0; /* number of bytes receieved */
+ unsigned sb = 0; /* number of bytes sent */
+ unsigned char *rx_buf = (unsigned char *)t->rx_buf;
+ unsigned char *tx_buf = (unsigned char *)t->tx_buf;
+ unsigned rfalarm;
+ unsigned send_at_once = MPC52xx_PSC_BUFSIZE;
+ unsigned recv_at_once;
+ int last_block = 0;
+
+ if (!t->tx_buf && !t->rx_buf && t->len)
+ return -EINVAL;
+
+ /* enable transmiter/receiver */
+ out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
+ while (rb < t->len) {
+ if (t->len - rb > MPC52xx_PSC_BUFSIZE) {
+ rfalarm = MPC52xx_PSC_RFALARM;
+ last_block = 0;
+ } else {
+ send_at_once = t->len - sb;
+ rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb);
+ last_block = 1;
+ }
+
+ dev_dbg(&spi->dev, "send %d bytes...\n", send_at_once);
+ for (; send_at_once; sb++, send_at_once--) {
+ /* set EOF flag before the last word is sent */
+ if (send_at_once == 1 && last_block)
+ out_8(&psc->ircr2, 0x01);
+
+ if (tx_buf)
+ out_8(&psc->mpc52xx_psc_buffer_8, tx_buf[sb]);
+ else
+ out_8(&psc->mpc52xx_psc_buffer_8, 0);
+ }
+
+
+ /* enable interrupts and wait for wake up
+ * if just one byte is expected the Rx FIFO genererates no
+ * FFULL interrupt, so activate the RxRDY interrupt
+ */
+ out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
+ if (t->len - rb == 1) {
+ out_8(&psc->mode, 0);
+ } else {
+ out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL);
+ out_be16(&fifo->rfalarm, rfalarm);
+ }
+ out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY);
+ wait_for_completion(&mps->done);
+ recv_at_once = in_be16(&fifo->rfnum);
+ dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once);
+
+ send_at_once = recv_at_once;
+ if (rx_buf) {
+ for (; recv_at_once; rb++, recv_at_once--)
+ rx_buf[rb] = in_8(&psc->mpc52xx_psc_buffer_8);
+ } else {
+ for (; recv_at_once; rb++, recv_at_once--)
+ in_8(&psc->mpc52xx_psc_buffer_8);
+ }
+ }
+ /* disable transmiter/receiver */
+ out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+
+ return 0;
+}
+
+int mpc52xx_psc_spi_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *m)
+{
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ unsigned cs_change;
+ int status;
+
+ spi = m->spi;
+ cs_change = 1;
+ status = 0;
+ list_for_each_entry (t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
+ status = mpc52xx_psc_spi_transfer_setup(spi, t);
+ if (status < 0)
+ break;
+ }
+
+ if (cs_change)
+ mpc52xx_psc_spi_activate_cs(spi);
+ cs_change = t->cs_change;
+
+ status = mpc52xx_psc_spi_transfer_rxtx(spi, t);
+ if (status)
+ break;
+ m->actual_length += t->len;
+
+ spi_transfer_delay_exec(t);
+
+ if (cs_change)
+ mpc52xx_psc_spi_deactivate_cs(spi);
+ }
+
+ m->status = status;
+ if (status || !cs_change)
+ mpc52xx_psc_spi_deactivate_cs(spi);
+
+ mpc52xx_psc_spi_transfer_setup(spi, NULL);
+
+ spi_finalize_current_message(ctlr);
+
+ return 0;
+}
+
+static int mpc52xx_psc_spi_setup(struct spi_device *spi)
+{
+ struct mpc52xx_psc_spi_cs *cs = spi->controller_state;
+
+ if (spi->bits_per_word%8)
+ return -EINVAL;
+
+ if (!cs) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi->controller_state = cs;
+ }
+
+ cs->bits_per_word = spi->bits_per_word;
+ cs->speed_hz = spi->max_speed_hz;
+
+ return 0;
+}
+
+static void mpc52xx_psc_spi_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+
+static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
+{
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
+ u32 mclken_div;
+ int ret;
+
+ /* default sysclk is 512MHz */
+ mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK;
+ ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
+ if (ret)
+ return ret;
+
+ /* Reset the PSC into a known state */
+ out_8(&psc->command, MPC52xx_PSC_RST_RX);
+ out_8(&psc->command, MPC52xx_PSC_RST_TX);
+ out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+
+ /* Disable interrupts, interrupts are based on alarm level */
+ out_be16(&psc->mpc52xx_psc_imr, 0);
+ out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
+ out_8(&fifo->rfcntl, 0);
+ out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL);
+
+ /* Configure 8bit codec mode as a SPI master and use EOF flags */
+ /* SICR_SIM_CODEC8|SICR_GENCLK|SICR_SPI|SICR_MSTR|SICR_USEEOF */
+ out_be32(&psc->sicr, 0x0180C800);
+ out_be16((u16 __iomem *)&psc->ccr, 0x070F); /* default SPI Clk 1MHz */
+
+ /* Set 2ms DTL delay */
+ out_8(&psc->ctur, 0x00);
+ out_8(&psc->ctlr, 0x84);
+
+ mps->bits_per_word = 8;
+
+ return 0;
+}
+
+static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
+{
+ struct mpc52xx_psc_spi *mps = (struct mpc52xx_psc_spi *)dev_id;
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+
+ /* disable interrupt and wake up the work queue */
+ if (in_be16(&psc->mpc52xx_psc_isr) & MPC52xx_PSC_IMR_RXRDY) {
+ out_be16(&psc->mpc52xx_psc_imr, 0);
+ complete(&mps->done);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/* bus_num is used only for the case dev->platform_data == NULL */
+static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
+ u32 size, unsigned int irq, s16 bus_num)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc52xx_psc_spi *mps;
+ struct spi_master *master;
+ int ret;
+
+ master = spi_alloc_master(dev, sizeof(*mps));
+ if (master == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, master);
+ mps = spi_master_get_devdata(master);
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+
+ mps->irq = irq;
+ if (pdata == NULL) {
+ dev_warn(dev,
+ "probe called without platform data, no cs_control function will be called\n");
+ mps->cs_control = NULL;
+ mps->sysclk = 0;
+ master->bus_num = bus_num;
+ master->num_chipselect = 255;
+ } else {
+ mps->cs_control = pdata->cs_control;
+ mps->sysclk = pdata->sysclk;
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->max_chipselect;
+ }
+ master->setup = mpc52xx_psc_spi_setup;
+ master->transfer_one_message = mpc52xx_psc_spi_transfer_one_message;
+ master->cleanup = mpc52xx_psc_spi_cleanup;
+ master->dev.of_node = dev->of_node;
+
+ mps->psc = ioremap(regaddr, size);
+ if (!mps->psc) {
+ dev_err(dev, "could not ioremap I/O port range\n");
+ ret = -EFAULT;
+ goto free_master;
+ }
+ /* On the 5200, fifo regs are immediately ajacent to the psc regs */
+ mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc);
+
+ ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi",
+ mps);
+ if (ret)
+ goto free_master;
+
+ ret = mpc52xx_psc_spi_port_config(master->bus_num, mps);
+ if (ret < 0) {
+ dev_err(dev, "can't configure PSC! Is it capable of SPI?\n");
+ goto free_irq;
+ }
+
+ init_completion(&mps->done);
+
+ ret = spi_register_master(master);
+ if (ret < 0)
+ goto free_irq;
+
+ return ret;
+
+free_irq:
+ free_irq(mps->irq, mps);
+free_master:
+ if (mps->psc)
+ iounmap(mps->psc);
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int mpc52xx_psc_spi_of_probe(struct platform_device *op)
+{
+ const u32 *regaddr_p;
+ u64 regaddr64, size64;
+ s16 id = -1;
+
+ regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
+ if (!regaddr_p) {
+ dev_err(&op->dev, "Invalid PSC address\n");
+ return -EINVAL;
+ }
+ regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
+
+ /* get PSC id (1..6, used by port_config) */
+ if (op->dev.platform_data == NULL) {
+ const u32 *psc_nump;
+
+ psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL);
+ if (!psc_nump || *psc_nump > 5) {
+ dev_err(&op->dev, "Invalid cell-index property\n");
+ return -EINVAL;
+ }
+ id = *psc_nump + 1;
+ }
+
+ return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
+ irq_of_parse_and_map(op->dev.of_node, 0), id);
+}
+
+static int mpc52xx_psc_spi_of_remove(struct platform_device *op)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(op));
+ struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
+
+ spi_unregister_master(master);
+ free_irq(mps->irq, mps);
+ if (mps->psc)
+ iounmap(mps->psc);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
+ { .compatible = "fsl,mpc5200-psc-spi", },
+ { .compatible = "mpc5200-psc-spi", }, /* old */
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
+
+static struct platform_driver mpc52xx_psc_spi_of_driver = {
+ .probe = mpc52xx_psc_spi_of_probe,
+ .remove = mpc52xx_psc_spi_of_remove,
+ .driver = {
+ .name = "mpc52xx-psc-spi",
+ .of_match_table = mpc52xx_psc_spi_of_match,
+ },
+};
+module_platform_driver(mpc52xx_psc_spi_of_driver);
+
+MODULE_AUTHOR("Dragos Carp");
+MODULE_DESCRIPTION("MPC52xx PSC SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
new file mode 100644
index 000000000..7b64e64c6
--- /dev/null
+++ b/drivers/spi/spi-mpc52xx.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MPC52xx SPI bus driver.
+ *
+ * Copyright (C) 2008 Secret Lab Technologies Ltd.
+ *
+ * This is the driver for the MPC5200's dedicated SPI controller.
+ *
+ * Note: this driver does not support the MPC5200 PSC in SPI mode. For
+ * that driver see drivers/spi/mpc52xx_psc_spi.c
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/time.h>
+#include <asm/mpc52xx.h>
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver");
+MODULE_LICENSE("GPL");
+
+/* Register offsets */
+#define SPI_CTRL1 0x00
+#define SPI_CTRL1_SPIE (1 << 7)
+#define SPI_CTRL1_SPE (1 << 6)
+#define SPI_CTRL1_MSTR (1 << 4)
+#define SPI_CTRL1_CPOL (1 << 3)
+#define SPI_CTRL1_CPHA (1 << 2)
+#define SPI_CTRL1_SSOE (1 << 1)
+#define SPI_CTRL1_LSBFE (1 << 0)
+
+#define SPI_CTRL2 0x01
+#define SPI_BRR 0x04
+
+#define SPI_STATUS 0x05
+#define SPI_STATUS_SPIF (1 << 7)
+#define SPI_STATUS_WCOL (1 << 6)
+#define SPI_STATUS_MODF (1 << 4)
+
+#define SPI_DATA 0x09
+#define SPI_PORTDATA 0x0d
+#define SPI_DATADIR 0x10
+
+/* FSM state return values */
+#define FSM_STOP 0 /* Nothing more for the state machine to */
+ /* do. If something interesting happens */
+ /* then an IRQ will be received */
+#define FSM_POLL 1 /* need to poll for completion, an IRQ is */
+ /* not expected */
+#define FSM_CONTINUE 2 /* Keep iterating the state machine */
+
+/* Driver internal data */
+struct mpc52xx_spi {
+ struct spi_master *master;
+ void __iomem *regs;
+ int irq0; /* MODF irq */
+ int irq1; /* SPIF irq */
+ unsigned int ipb_freq;
+
+ /* Statistics; not used now, but will be reintroduced for debugfs */
+ int msg_count;
+ int wcol_count;
+ int wcol_ticks;
+ u32 wcol_tx_timestamp;
+ int modf_count;
+ int byte_count;
+
+ struct list_head queue; /* queue of pending messages */
+ spinlock_t lock;
+ struct work_struct work;
+
+ /* Details of current transfer (length, and buffer pointers) */
+ struct spi_message *message; /* current message */
+ struct spi_transfer *transfer; /* current transfer */
+ int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data);
+ int len;
+ int timestamp;
+ u8 *rx_buf;
+ const u8 *tx_buf;
+ int cs_change;
+ int gpio_cs_count;
+ struct gpio_desc **gpio_cs;
+};
+
+/*
+ * CS control function
+ */
+static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
+{
+ int cs;
+
+ if (ms->gpio_cs_count > 0) {
+ cs = ms->message->spi->chip_select;
+ gpiod_set_value(ms->gpio_cs[cs], value);
+ } else {
+ out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
+ }
+}
+
+/*
+ * Start a new transfer. This is called both by the idle state
+ * for the first transfer in a message, and by the wait state when the
+ * previous transfer in a message is complete.
+ */
+static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms)
+{
+ ms->rx_buf = ms->transfer->rx_buf;
+ ms->tx_buf = ms->transfer->tx_buf;
+ ms->len = ms->transfer->len;
+
+ /* Activate the chip select */
+ if (ms->cs_change)
+ mpc52xx_spi_chipsel(ms, 1);
+ ms->cs_change = ms->transfer->cs_change;
+
+ /* Write out the first byte */
+ ms->wcol_tx_timestamp = mftb();
+ if (ms->tx_buf)
+ out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
+ else
+ out_8(ms->regs + SPI_DATA, 0);
+}
+
+/* Forward declaration of state handlers */
+static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
+ u8 status, u8 data);
+static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms,
+ u8 status, u8 data);
+
+/*
+ * IDLE state
+ *
+ * No transfers are in progress; if another transfer is pending then retrieve
+ * it and kick it off. Otherwise, stop processing the state machine
+ */
+static int
+mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
+{
+ struct spi_device *spi;
+ int spr, sppr;
+ u8 ctrl1;
+
+ if (status && irq)
+ dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
+ status);
+
+ /* Check if there is another transfer waiting. */
+ if (list_empty(&ms->queue))
+ return FSM_STOP;
+
+ /* get the head of the queue */
+ ms->message = list_first_entry(&ms->queue, struct spi_message, queue);
+ list_del_init(&ms->message->queue);
+
+ /* Setup the controller parameters */
+ ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
+ spi = ms->message->spi;
+ if (spi->mode & SPI_CPHA)
+ ctrl1 |= SPI_CTRL1_CPHA;
+ if (spi->mode & SPI_CPOL)
+ ctrl1 |= SPI_CTRL1_CPOL;
+ if (spi->mode & SPI_LSB_FIRST)
+ ctrl1 |= SPI_CTRL1_LSBFE;
+ out_8(ms->regs + SPI_CTRL1, ctrl1);
+
+ /* Setup the controller speed */
+ /* minimum divider is '2'. Also, add '1' to force rounding the
+ * divider up. */
+ sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1;
+ spr = 0;
+ if (sppr < 1)
+ sppr = 1;
+ while (((sppr - 1) & ~0x7) != 0) {
+ sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */
+ spr++;
+ }
+ sppr--; /* sppr quantity in register is offset by 1 */
+ if (spr > 7) {
+ /* Don't overrun limits of SPI baudrate register */
+ spr = 7;
+ sppr = 7;
+ }
+ out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */
+
+ ms->cs_change = 1;
+ ms->transfer = container_of(ms->message->transfers.next,
+ struct spi_transfer, transfer_list);
+
+ mpc52xx_spi_start_transfer(ms);
+ ms->state = mpc52xx_spi_fsmstate_transfer;
+
+ return FSM_CONTINUE;
+}
+
+/*
+ * TRANSFER state
+ *
+ * In the middle of a transfer. If the SPI core has completed processing
+ * a byte, then read out the received data and write out the next byte
+ * (unless this transfer is finished; in which case go on to the wait
+ * state)
+ */
+static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
+ u8 status, u8 data)
+{
+ if (!status)
+ return ms->irq0 ? FSM_STOP : FSM_POLL;
+
+ if (status & SPI_STATUS_WCOL) {
+ /* The SPI controller is stoopid. At slower speeds, it may
+ * raise the SPIF flag before the state machine is actually
+ * finished, which causes a collision (internal to the state
+ * machine only). The manual recommends inserting a delay
+ * between receiving the interrupt and sending the next byte,
+ * but it can also be worked around simply by retrying the
+ * transfer which is what we do here. */
+ ms->wcol_count++;
+ ms->wcol_ticks += mftb() - ms->wcol_tx_timestamp;
+ ms->wcol_tx_timestamp = mftb();
+ data = 0;
+ if (ms->tx_buf)
+ data = *(ms->tx_buf - 1);
+ out_8(ms->regs + SPI_DATA, data); /* try again */
+ return FSM_CONTINUE;
+ } else if (status & SPI_STATUS_MODF) {
+ ms->modf_count++;
+ dev_err(&ms->master->dev, "mode fault\n");
+ mpc52xx_spi_chipsel(ms, 0);
+ ms->message->status = -EIO;
+ if (ms->message->complete)
+ ms->message->complete(ms->message->context);
+ ms->state = mpc52xx_spi_fsmstate_idle;
+ return FSM_CONTINUE;
+ }
+
+ /* Read data out of the spi device */
+ ms->byte_count++;
+ if (ms->rx_buf)
+ *ms->rx_buf++ = data;
+
+ /* Is the transfer complete? */
+ ms->len--;
+ if (ms->len == 0) {
+ ms->timestamp = mftb();
+ if (ms->transfer->delay.unit == SPI_DELAY_UNIT_USECS)
+ ms->timestamp += ms->transfer->delay.value *
+ tb_ticks_per_usec;
+ ms->state = mpc52xx_spi_fsmstate_wait;
+ return FSM_CONTINUE;
+ }
+
+ /* Write out the next byte */
+ ms->wcol_tx_timestamp = mftb();
+ if (ms->tx_buf)
+ out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
+ else
+ out_8(ms->regs + SPI_DATA, 0);
+
+ return FSM_CONTINUE;
+}
+
+/*
+ * WAIT state
+ *
+ * A transfer has completed; need to wait for the delay period to complete
+ * before starting the next transfer
+ */
+static int
+mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
+{
+ if (status && irq)
+ dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
+ status);
+
+ if (((int)mftb()) - ms->timestamp < 0)
+ return FSM_POLL;
+
+ ms->message->actual_length += ms->transfer->len;
+
+ /* Check if there is another transfer in this message. If there
+ * aren't then deactivate CS, notify sender, and drop back to idle
+ * to start the next message. */
+ if (ms->transfer->transfer_list.next == &ms->message->transfers) {
+ ms->msg_count++;
+ mpc52xx_spi_chipsel(ms, 0);
+ ms->message->status = 0;
+ if (ms->message->complete)
+ ms->message->complete(ms->message->context);
+ ms->state = mpc52xx_spi_fsmstate_idle;
+ return FSM_CONTINUE;
+ }
+
+ /* There is another transfer; kick it off */
+
+ if (ms->cs_change)
+ mpc52xx_spi_chipsel(ms, 0);
+
+ ms->transfer = container_of(ms->transfer->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ mpc52xx_spi_start_transfer(ms);
+ ms->state = mpc52xx_spi_fsmstate_transfer;
+ return FSM_CONTINUE;
+}
+
+/**
+ * mpc52xx_spi_fsm_process - Finite State Machine iteration function
+ * @irq: irq number that triggered the FSM or 0 for polling
+ * @ms: pointer to mpc52xx_spi driver data
+ */
+static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms)
+{
+ int rc = FSM_CONTINUE;
+ u8 status, data;
+
+ while (rc == FSM_CONTINUE) {
+ /* Interrupt cleared by read of STATUS followed by
+ * read of DATA registers */
+ status = in_8(ms->regs + SPI_STATUS);
+ data = in_8(ms->regs + SPI_DATA);
+ rc = ms->state(irq, ms, status, data);
+ }
+
+ if (rc == FSM_POLL)
+ schedule_work(&ms->work);
+}
+
+/**
+ * mpc52xx_spi_irq - IRQ handler
+ */
+static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms)
+{
+ struct mpc52xx_spi *ms = _ms;
+ spin_lock(&ms->lock);
+ mpc52xx_spi_fsm_process(irq, ms);
+ spin_unlock(&ms->lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpc52xx_spi_wq - Workqueue function for polling the state machine
+ */
+static void mpc52xx_spi_wq(struct work_struct *work)
+{
+ struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ms->lock, flags);
+ mpc52xx_spi_fsm_process(0, ms);
+ spin_unlock_irqrestore(&ms->lock, flags);
+}
+
+/*
+ * spi_master ops
+ */
+
+static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ m->actual_length = 0;
+ m->status = -EINPROGRESS;
+
+ spin_lock_irqsave(&ms->lock, flags);
+ list_add_tail(&m->queue, &ms->queue);
+ spin_unlock_irqrestore(&ms->lock, flags);
+ schedule_work(&ms->work);
+
+ return 0;
+}
+
+/*
+ * OF Platform Bus Binding
+ */
+static int mpc52xx_spi_probe(struct platform_device *op)
+{
+ struct spi_master *master;
+ struct mpc52xx_spi *ms;
+ struct gpio_desc *gpio_cs;
+ void __iomem *regs;
+ u8 ctrl1;
+ int rc, i = 0;
+
+ /* MMIO registers */
+ dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
+ regs = of_iomap(op->dev.of_node, 0);
+ if (!regs)
+ return -ENODEV;
+
+ /* initialize the device */
+ ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
+ out_8(regs + SPI_CTRL1, ctrl1);
+ out_8(regs + SPI_CTRL2, 0x0);
+ out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */
+ out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */
+
+ /* Clear the status register and re-read it to check for a MODF
+ * failure. This driver cannot currently handle multiple masters
+ * on the SPI bus. This fault will also occur if the SPI signals
+ * are not connected to any pins (port_config setting) */
+ in_8(regs + SPI_STATUS);
+ out_8(regs + SPI_CTRL1, ctrl1);
+
+ in_8(regs + SPI_DATA);
+ if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
+ dev_err(&op->dev, "mode fault; is port_config correct?\n");
+ rc = -EIO;
+ goto err_init;
+ }
+
+ dev_dbg(&op->dev, "allocating spi_master struct\n");
+ master = spi_alloc_master(&op->dev, sizeof(*ms));
+ if (!master) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ master->transfer = mpc52xx_spi_transfer;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = op->dev.of_node;
+
+ platform_set_drvdata(op, master);
+
+ ms = spi_master_get_devdata(master);
+ ms->master = master;
+ ms->regs = regs;
+ ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
+ ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
+ ms->state = mpc52xx_spi_fsmstate_idle;
+ ms->ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
+ ms->gpio_cs_count = gpiod_count(&op->dev, NULL);
+ if (ms->gpio_cs_count > 0) {
+ master->num_chipselect = ms->gpio_cs_count;
+ ms->gpio_cs = kmalloc_array(ms->gpio_cs_count,
+ sizeof(*ms->gpio_cs),
+ GFP_KERNEL);
+ if (!ms->gpio_cs) {
+ rc = -ENOMEM;
+ goto err_alloc_gpio;
+ }
+
+ for (i = 0; i < ms->gpio_cs_count; i++) {
+ gpio_cs = gpiod_get_index(&op->dev,
+ NULL, i, GPIOD_OUT_LOW);
+ rc = PTR_ERR_OR_ZERO(gpio_cs);
+ if (rc) {
+ dev_err(&op->dev,
+ "failed to get spi cs gpio #%d: %d\n",
+ i, rc);
+ goto err_gpio;
+ }
+
+ ms->gpio_cs[i] = gpio_cs;
+ }
+ }
+
+ spin_lock_init(&ms->lock);
+ INIT_LIST_HEAD(&ms->queue);
+ INIT_WORK(&ms->work, mpc52xx_spi_wq);
+
+ /* Decide if interrupts can be used */
+ if (ms->irq0 && ms->irq1) {
+ rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0,
+ "mpc5200-spi-modf", ms);
+ rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0,
+ "mpc5200-spi-spif", ms);
+ if (rc) {
+ free_irq(ms->irq0, ms);
+ free_irq(ms->irq1, ms);
+ ms->irq0 = ms->irq1 = 0;
+ }
+ } else {
+ /* operate in polled mode */
+ ms->irq0 = ms->irq1 = 0;
+ }
+
+ if (!ms->irq0)
+ dev_info(&op->dev, "using polled mode\n");
+
+ dev_dbg(&op->dev, "registering spi_master struct\n");
+ rc = spi_register_master(master);
+ if (rc)
+ goto err_register;
+
+ dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
+
+ return rc;
+
+ err_register:
+ dev_err(&ms->master->dev, "initialization failed\n");
+ err_gpio:
+ while (i-- > 0)
+ gpiod_put(ms->gpio_cs[i]);
+
+ kfree(ms->gpio_cs);
+ err_alloc_gpio:
+ spi_master_put(master);
+ err_alloc:
+ err_init:
+ iounmap(regs);
+ return rc;
+}
+
+static int mpc52xx_spi_remove(struct platform_device *op)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(op));
+ struct mpc52xx_spi *ms = spi_master_get_devdata(master);
+ int i;
+
+ free_irq(ms->irq0, ms);
+ free_irq(ms->irq1, ms);
+
+ for (i = 0; i < ms->gpio_cs_count; i++)
+ gpiod_put(ms->gpio_cs[i]);
+
+ kfree(ms->gpio_cs);
+ spi_unregister_master(master);
+ iounmap(ms->regs);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static const struct of_device_id mpc52xx_spi_match[] = {
+ { .compatible = "fsl,mpc5200-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
+
+static struct platform_driver mpc52xx_spi_of_driver = {
+ .driver = {
+ .name = "mpc52xx-spi",
+ .of_match_table = mpc52xx_spi_match,
+ },
+ .probe = mpc52xx_spi_probe,
+ .remove = mpc52xx_spi_remove,
+};
+module_platform_driver(mpc52xx_spi_of_driver);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
new file mode 100644
index 000000000..6e95efb50
--- /dev/null
+++ b/drivers/spi/spi-mt65xx.c
@@ -0,0 +1,1423 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Leilk Liu <leilk.liu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/spi-mt65xx.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/dma-mapping.h>
+
+#define SPI_CFG0_REG 0x0000
+#define SPI_CFG1_REG 0x0004
+#define SPI_TX_SRC_REG 0x0008
+#define SPI_RX_DST_REG 0x000c
+#define SPI_TX_DATA_REG 0x0010
+#define SPI_RX_DATA_REG 0x0014
+#define SPI_CMD_REG 0x0018
+#define SPI_STATUS0_REG 0x001c
+#define SPI_PAD_SEL_REG 0x0024
+#define SPI_CFG2_REG 0x0028
+#define SPI_TX_SRC_REG_64 0x002c
+#define SPI_RX_DST_REG_64 0x0030
+#define SPI_CFG3_IPM_REG 0x0040
+
+#define SPI_CFG0_SCK_HIGH_OFFSET 0
+#define SPI_CFG0_SCK_LOW_OFFSET 8
+#define SPI_CFG0_CS_HOLD_OFFSET 16
+#define SPI_CFG0_CS_SETUP_OFFSET 24
+#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
+#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
+
+#define SPI_CFG1_CS_IDLE_OFFSET 0
+#define SPI_CFG1_PACKET_LOOP_OFFSET 8
+#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
+#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
+#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
+
+#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
+#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
+
+#define SPI_CFG1_CS_IDLE_MASK 0xff
+#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
+#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
+#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
+#define SPI_CFG2_SCK_HIGH_OFFSET 0
+#define SPI_CFG2_SCK_LOW_OFFSET 16
+
+#define SPI_CMD_ACT BIT(0)
+#define SPI_CMD_RESUME BIT(1)
+#define SPI_CMD_RST BIT(2)
+#define SPI_CMD_PAUSE_EN BIT(4)
+#define SPI_CMD_DEASSERT BIT(5)
+#define SPI_CMD_SAMPLE_SEL BIT(6)
+#define SPI_CMD_CS_POL BIT(7)
+#define SPI_CMD_CPHA BIT(8)
+#define SPI_CMD_CPOL BIT(9)
+#define SPI_CMD_RX_DMA BIT(10)
+#define SPI_CMD_TX_DMA BIT(11)
+#define SPI_CMD_TXMSBF BIT(12)
+#define SPI_CMD_RXMSBF BIT(13)
+#define SPI_CMD_RX_ENDIAN BIT(14)
+#define SPI_CMD_TX_ENDIAN BIT(15)
+#define SPI_CMD_FINISH_IE BIT(16)
+#define SPI_CMD_PAUSE_IE BIT(17)
+#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
+#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
+#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
+
+#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
+
+#define PIN_MODE_CFG(x) ((x) / 2)
+
+#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
+#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
+#define SPI_CFG3_IPM_XMODE_EN BIT(4)
+#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
+#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
+#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
+
+#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
+#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
+#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
+
+#define MT8173_SPI_MAX_PAD_SEL 3
+
+#define MTK_SPI_PAUSE_INT_STATUS 0x2
+
+#define MTK_SPI_MAX_FIFO_SIZE 32U
+#define MTK_SPI_PACKET_SIZE 1024
+#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
+#define MTK_SPI_IPM_PACKET_LOOP SZ_256
+
+#define MTK_SPI_IDLE 0
+#define MTK_SPI_PAUSED 1
+
+#define MTK_SPI_32BITS_MASK (0xffffffff)
+
+#define DMA_ADDR_EXT_BITS (36)
+#define DMA_ADDR_DEF_BITS (32)
+
+/**
+ * struct mtk_spi_compatible - device data structure
+ * @need_pad_sel: Enable pad (pins) selection in SPI controller
+ * @must_tx: Must explicitly send dummy TX bytes to do RX only transfer
+ * @enhance_timing: Enable adjusting cfg register to enhance time accuracy
+ * @dma_ext: DMA address extension supported
+ * @no_need_unprepare: Don't unprepare the SPI clk during runtime
+ * @ipm_design: Adjust/extend registers to support IPM design IP features
+ */
+struct mtk_spi_compatible {
+ bool need_pad_sel;
+ bool must_tx;
+ bool enhance_timing;
+ bool dma_ext;
+ bool no_need_unprepare;
+ bool ipm_design;
+};
+
+/**
+ * struct mtk_spi - SPI driver instance
+ * @base: Start address of the SPI controller registers
+ * @state: SPI controller state
+ * @pad_num: Number of pad_sel entries
+ * @pad_sel: Groups of pins to select
+ * @parent_clk: Parent of sel_clk
+ * @sel_clk: SPI master mux clock
+ * @spi_clk: Peripheral clock
+ * @spi_hclk: AHB bus clock
+ * @cur_transfer: Currently processed SPI transfer
+ * @xfer_len: Number of bytes to transfer
+ * @num_xfered: Number of transferred bytes
+ * @tx_sgl: TX transfer scatterlist
+ * @rx_sgl: RX transfer scatterlist
+ * @tx_sgl_len: Size of TX DMA transfer
+ * @rx_sgl_len: Size of RX DMA transfer
+ * @dev_comp: Device data structure
+ * @spi_clk_hz: Current SPI clock in Hz
+ * @spimem_done: SPI-MEM operation completion
+ * @use_spimem: Enables SPI-MEM
+ * @dev: Device pointer
+ * @tx_dma: DMA start for SPI-MEM TX
+ * @rx_dma: DMA start for SPI-MEM RX
+ */
+struct mtk_spi {
+ void __iomem *base;
+ u32 state;
+ int pad_num;
+ u32 *pad_sel;
+ struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
+ struct spi_transfer *cur_transfer;
+ u32 xfer_len;
+ u32 num_xfered;
+ struct scatterlist *tx_sgl, *rx_sgl;
+ u32 tx_sgl_len, rx_sgl_len;
+ const struct mtk_spi_compatible *dev_comp;
+ u32 spi_clk_hz;
+ struct completion spimem_done;
+ bool use_spimem;
+ struct device *dev;
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
+};
+
+static const struct mtk_spi_compatible mtk_common_compat;
+
+static const struct mtk_spi_compatible mt2712_compat = {
+ .must_tx = true,
+};
+
+static const struct mtk_spi_compatible mtk_ipm_compat = {
+ .enhance_timing = true,
+ .dma_ext = true,
+ .ipm_design = true,
+};
+
+static const struct mtk_spi_compatible mt6765_compat = {
+ .need_pad_sel = true,
+ .must_tx = true,
+ .enhance_timing = true,
+ .dma_ext = true,
+};
+
+static const struct mtk_spi_compatible mt7622_compat = {
+ .must_tx = true,
+ .enhance_timing = true,
+};
+
+static const struct mtk_spi_compatible mt8173_compat = {
+ .need_pad_sel = true,
+ .must_tx = true,
+};
+
+static const struct mtk_spi_compatible mt8183_compat = {
+ .need_pad_sel = true,
+ .must_tx = true,
+ .enhance_timing = true,
+};
+
+static const struct mtk_spi_compatible mt6893_compat = {
+ .need_pad_sel = true,
+ .must_tx = true,
+ .enhance_timing = true,
+ .dma_ext = true,
+ .no_need_unprepare = true,
+};
+
+/*
+ * A piece of default chip info unless the platform
+ * supplies it.
+ */
+static const struct mtk_chip_config mtk_default_chip_info = {
+ .sample_sel = 0,
+ .tick_delay = 0,
+};
+
+static const struct of_device_id mtk_spi_of_match[] = {
+ { .compatible = "mediatek,spi-ipm",
+ .data = (void *)&mtk_ipm_compat,
+ },
+ { .compatible = "mediatek,mt2701-spi",
+ .data = (void *)&mtk_common_compat,
+ },
+ { .compatible = "mediatek,mt2712-spi",
+ .data = (void *)&mt2712_compat,
+ },
+ { .compatible = "mediatek,mt6589-spi",
+ .data = (void *)&mtk_common_compat,
+ },
+ { .compatible = "mediatek,mt6765-spi",
+ .data = (void *)&mt6765_compat,
+ },
+ { .compatible = "mediatek,mt7622-spi",
+ .data = (void *)&mt7622_compat,
+ },
+ { .compatible = "mediatek,mt7629-spi",
+ .data = (void *)&mt7622_compat,
+ },
+ { .compatible = "mediatek,mt8135-spi",
+ .data = (void *)&mtk_common_compat,
+ },
+ { .compatible = "mediatek,mt8173-spi",
+ .data = (void *)&mt8173_compat,
+ },
+ { .compatible = "mediatek,mt8183-spi",
+ .data = (void *)&mt8183_compat,
+ },
+ { .compatible = "mediatek,mt8192-spi",
+ .data = (void *)&mt6765_compat,
+ },
+ { .compatible = "mediatek,mt6893-spi",
+ .data = (void *)&mt6893_compat,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
+
+static void mtk_spi_reset(struct mtk_spi *mdata)
+{
+ u32 reg_val;
+
+ /* set the software reset bit in SPI_CMD_REG. */
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val |= SPI_CMD_RST;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val &= ~SPI_CMD_RST;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+}
+
+static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+ struct spi_delay *cs_setup = &spi->cs_setup;
+ struct spi_delay *cs_hold = &spi->cs_hold;
+ struct spi_delay *cs_inactive = &spi->cs_inactive;
+ u32 setup, hold, inactive;
+ u32 reg_val;
+ int delay;
+
+ delay = spi_delay_to_ns(cs_setup, NULL);
+ if (delay < 0)
+ return delay;
+ setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+ delay = spi_delay_to_ns(cs_hold, NULL);
+ if (delay < 0)
+ return delay;
+ hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+ delay = spi_delay_to_ns(cs_inactive, NULL);
+ if (delay < 0)
+ return delay;
+ inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+ if (hold || setup) {
+ reg_val = readl(mdata->base + SPI_CFG0_REG);
+ if (mdata->dev_comp->enhance_timing) {
+ if (hold) {
+ hold = min_t(u32, hold, 0x10000);
+ reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+ reg_val |= (((hold - 1) & 0xffff)
+ << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+ }
+ if (setup) {
+ setup = min_t(u32, setup, 0x10000);
+ reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+ reg_val |= (((setup - 1) & 0xffff)
+ << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+ }
+ } else {
+ if (hold) {
+ hold = min_t(u32, hold, 0x100);
+ reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
+ reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+ }
+ if (setup) {
+ setup = min_t(u32, setup, 0x100);
+ reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
+ reg_val |= (((setup - 1) & 0xff)
+ << SPI_CFG0_CS_SETUP_OFFSET);
+ }
+ }
+ writel(reg_val, mdata->base + SPI_CFG0_REG);
+ }
+
+ if (inactive) {
+ inactive = min_t(u32, inactive, 0x100);
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
+ reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+ }
+
+ return 0;
+}
+
+static int mtk_spi_hw_init(struct spi_master *master,
+ struct spi_device *spi)
+{
+ u16 cpha, cpol;
+ u32 reg_val;
+ struct mtk_chip_config *chip_config = spi->controller_data;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ cpha = spi->mode & SPI_CPHA ? 1 : 0;
+ cpol = spi->mode & SPI_CPOL ? 1 : 0;
+
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ if (mdata->dev_comp->ipm_design) {
+ /* SPI transfer without idle time until packet length done */
+ reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
+ if (spi->mode & SPI_LOOP)
+ reg_val |= SPI_CMD_IPM_SPIM_LOOP;
+ else
+ reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
+ }
+
+ if (cpha)
+ reg_val |= SPI_CMD_CPHA;
+ else
+ reg_val &= ~SPI_CMD_CPHA;
+ if (cpol)
+ reg_val |= SPI_CMD_CPOL;
+ else
+ reg_val &= ~SPI_CMD_CPOL;
+
+ /* set the mlsbx and mlsbtx */
+ if (spi->mode & SPI_LSB_FIRST) {
+ reg_val &= ~SPI_CMD_TXMSBF;
+ reg_val &= ~SPI_CMD_RXMSBF;
+ } else {
+ reg_val |= SPI_CMD_TXMSBF;
+ reg_val |= SPI_CMD_RXMSBF;
+ }
+
+ /* set the tx/rx endian */
+#ifdef __LITTLE_ENDIAN
+ reg_val &= ~SPI_CMD_TX_ENDIAN;
+ reg_val &= ~SPI_CMD_RX_ENDIAN;
+#else
+ reg_val |= SPI_CMD_TX_ENDIAN;
+ reg_val |= SPI_CMD_RX_ENDIAN;
+#endif
+
+ if (mdata->dev_comp->enhance_timing) {
+ /* set CS polarity */
+ if (spi->mode & SPI_CS_HIGH)
+ reg_val |= SPI_CMD_CS_POL;
+ else
+ reg_val &= ~SPI_CMD_CS_POL;
+
+ if (chip_config->sample_sel)
+ reg_val |= SPI_CMD_SAMPLE_SEL;
+ else
+ reg_val &= ~SPI_CMD_SAMPLE_SEL;
+ }
+
+ /* set finish and pause interrupt always enable */
+ reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
+
+ /* disable dma mode */
+ reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
+
+ /* disable deassert mode */
+ reg_val &= ~SPI_CMD_DEASSERT;
+
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
+ /* pad select */
+ if (mdata->dev_comp->need_pad_sel)
+ writel(mdata->pad_sel[spi->chip_select],
+ mdata->base + SPI_PAD_SEL_REG);
+
+ /* tick delay */
+ if (mdata->dev_comp->enhance_timing) {
+ if (mdata->dev_comp->ipm_design) {
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
+ reg_val |= ((chip_config->tick_delay & 0x7)
+ << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+ } else {
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
+ reg_val |= ((chip_config->tick_delay & 0x7)
+ << SPI_CFG1_GET_TICK_DLY_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+ }
+ } else {
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
+ reg_val |= ((chip_config->tick_delay & 0x3)
+ << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+ }
+
+ /* set hw cs timing */
+ mtk_spi_set_hw_cs_timing(spi);
+ return 0;
+}
+
+static int mtk_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ return mtk_spi_hw_init(master, msg->spi);
+}
+
+static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ u32 reg_val;
+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
+
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ if (!enable) {
+ reg_val |= SPI_CMD_PAUSE_EN;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+ } else {
+ reg_val &= ~SPI_CMD_PAUSE_EN;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+ mdata->state = MTK_SPI_IDLE;
+ mtk_spi_reset(mdata);
+ }
+}
+
+static void mtk_spi_prepare_transfer(struct spi_master *master,
+ u32 speed_hz)
+{
+ u32 div, sck_time, reg_val;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ if (speed_hz < mdata->spi_clk_hz / 2)
+ div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
+ else
+ div = 1;
+
+ sck_time = (div + 1) / 2;
+
+ if (mdata->dev_comp->enhance_timing) {
+ reg_val = readl(mdata->base + SPI_CFG2_REG);
+ reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
+ reg_val |= (((sck_time - 1) & 0xffff)
+ << SPI_CFG2_SCK_HIGH_OFFSET);
+ reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
+ reg_val |= (((sck_time - 1) & 0xffff)
+ << SPI_CFG2_SCK_LOW_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG2_REG);
+ } else {
+ reg_val = readl(mdata->base + SPI_CFG0_REG);
+ reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET);
+ reg_val |= (((sck_time - 1) & 0xff)
+ << SPI_CFG0_SCK_HIGH_OFFSET);
+ reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET);
+ reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG0_REG);
+ }
+}
+
+static void mtk_spi_setup_packet(struct spi_master *master)
+{
+ u32 packet_size, packet_loop, reg_val;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ if (mdata->dev_comp->ipm_design)
+ packet_size = min_t(u32,
+ mdata->xfer_len,
+ MTK_SPI_IPM_PACKET_SIZE);
+ else
+ packet_size = min_t(u32,
+ mdata->xfer_len,
+ MTK_SPI_PACKET_SIZE);
+
+ packet_loop = mdata->xfer_len / packet_size;
+
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ if (mdata->dev_comp->ipm_design)
+ reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
+ else
+ reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
+ reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
+ reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
+ reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+}
+
+static void mtk_spi_enable_transfer(struct spi_master *master)
+{
+ u32 cmd;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ cmd = readl(mdata->base + SPI_CMD_REG);
+ if (mdata->state == MTK_SPI_IDLE)
+ cmd |= SPI_CMD_ACT;
+ else
+ cmd |= SPI_CMD_RESUME;
+ writel(cmd, mdata->base + SPI_CMD_REG);
+}
+
+static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len)
+{
+ u32 mult_delta = 0;
+
+ if (mdata->dev_comp->ipm_design) {
+ if (xfer_len > MTK_SPI_IPM_PACKET_SIZE)
+ mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE;
+ } else {
+ if (xfer_len > MTK_SPI_PACKET_SIZE)
+ mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
+ }
+
+ return mult_delta;
+}
+
+static void mtk_spi_update_mdata_len(struct spi_master *master)
+{
+ int mult_delta;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
+ if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
+ mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
+ mdata->rx_sgl_len = mult_delta;
+ mdata->tx_sgl_len -= mdata->xfer_len;
+ } else {
+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
+ mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
+ mdata->tx_sgl_len = mult_delta;
+ mdata->rx_sgl_len -= mdata->xfer_len;
+ }
+ } else if (mdata->tx_sgl_len) {
+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
+ mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
+ mdata->tx_sgl_len = mult_delta;
+ } else if (mdata->rx_sgl_len) {
+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
+ mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
+ mdata->rx_sgl_len = mult_delta;
+ }
+}
+
+static void mtk_spi_setup_dma_addr(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ if (mdata->tx_sgl) {
+ writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
+ mdata->base + SPI_TX_SRC_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (mdata->dev_comp->dma_ext)
+ writel((u32)(xfer->tx_dma >> 32),
+ mdata->base + SPI_TX_SRC_REG_64);
+#endif
+ }
+
+ if (mdata->rx_sgl) {
+ writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
+ mdata->base + SPI_RX_DST_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (mdata->dev_comp->dma_ext)
+ writel((u32)(xfer->rx_dma >> 32),
+ mdata->base + SPI_RX_DST_REG_64);
+#endif
+ }
+}
+
+static int mtk_spi_fifo_transfer(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ int cnt, remainder;
+ u32 reg_val;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ mdata->cur_transfer = xfer;
+ mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
+ mdata->num_xfered = 0;
+ mtk_spi_prepare_transfer(master, xfer->speed_hz);
+ mtk_spi_setup_packet(master);
+
+ if (xfer->tx_buf) {
+ cnt = xfer->len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
+ remainder = xfer->len % 4;
+ if (remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
+ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ }
+ }
+
+ mtk_spi_enable_transfer(master);
+
+ return 1;
+}
+
+static int mtk_spi_dma_transfer(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ int cmd;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ mdata->tx_sgl = NULL;
+ mdata->rx_sgl = NULL;
+ mdata->tx_sgl_len = 0;
+ mdata->rx_sgl_len = 0;
+ mdata->cur_transfer = xfer;
+ mdata->num_xfered = 0;
+
+ mtk_spi_prepare_transfer(master, xfer->speed_hz);
+
+ cmd = readl(mdata->base + SPI_CMD_REG);
+ if (xfer->tx_buf)
+ cmd |= SPI_CMD_TX_DMA;
+ if (xfer->rx_buf)
+ cmd |= SPI_CMD_RX_DMA;
+ writel(cmd, mdata->base + SPI_CMD_REG);
+
+ if (xfer->tx_buf)
+ mdata->tx_sgl = xfer->tx_sg.sgl;
+ if (xfer->rx_buf)
+ mdata->rx_sgl = xfer->rx_sg.sgl;
+
+ if (mdata->tx_sgl) {
+ xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
+ mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
+ }
+ if (mdata->rx_sgl) {
+ xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
+ mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
+ }
+
+ mtk_spi_update_mdata_len(master);
+ mtk_spi_setup_packet(master);
+ mtk_spi_setup_dma_addr(master, xfer);
+ mtk_spi_enable_transfer(master);
+
+ return 1;
+}
+
+static int mtk_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+ u32 reg_val = 0;
+
+ /* prepare xfer direction and duplex mode */
+ if (mdata->dev_comp->ipm_design) {
+ if (!xfer->tx_buf || !xfer->rx_buf) {
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
+ if (xfer->rx_buf)
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+ }
+ writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
+ }
+
+ if (master->can_dma(master, spi, xfer))
+ return mtk_spi_dma_transfer(master, spi, xfer);
+ else
+ return mtk_spi_fifo_transfer(master, spi, xfer);
+}
+
+static bool mtk_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ /* Buffers for DMA transactions must be 4-byte aligned */
+ return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
+ (unsigned long)xfer->tx_buf % 4 == 0 &&
+ (unsigned long)xfer->rx_buf % 4 == 0);
+}
+
+static int mtk_spi_setup(struct spi_device *spi)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+
+ if (!spi->controller_data)
+ spi->controller_data = (void *)&mtk_default_chip_info;
+
+ if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
+ /* CS de-asserted, gpiolib will handle inversion */
+ gpiod_direction_output(spi->cs_gpiod, 0);
+
+ return 0;
+}
+
+static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+{
+ u32 cmd, reg_val, cnt, remainder, len;
+ struct spi_master *master = dev_id;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+ struct spi_transfer *trans = mdata->cur_transfer;
+
+ reg_val = readl(mdata->base + SPI_STATUS0_REG);
+ if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
+ mdata->state = MTK_SPI_PAUSED;
+ else
+ mdata->state = MTK_SPI_IDLE;
+
+ /* SPI-MEM ops */
+ if (mdata->use_spimem) {
+ complete(&mdata->spimem_done);
+ return IRQ_HANDLED;
+ }
+
+ if (!master->can_dma(master, NULL, trans)) {
+ if (trans->rx_buf) {
+ cnt = mdata->xfer_len / 4;
+ ioread32_rep(mdata->base + SPI_RX_DATA_REG,
+ trans->rx_buf + mdata->num_xfered, cnt);
+ remainder = mdata->xfer_len % 4;
+ if (remainder > 0) {
+ reg_val = readl(mdata->base + SPI_RX_DATA_REG);
+ memcpy(trans->rx_buf +
+ mdata->num_xfered +
+ (cnt * 4),
+ &reg_val,
+ remainder);
+ }
+ }
+
+ mdata->num_xfered += mdata->xfer_len;
+ if (mdata->num_xfered == trans->len) {
+ spi_finalize_current_transfer(master);
+ return IRQ_HANDLED;
+ }
+
+ len = trans->len - mdata->num_xfered;
+ mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
+ mtk_spi_setup_packet(master);
+
+ cnt = mdata->xfer_len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
+ trans->tx_buf + mdata->num_xfered, cnt);
+
+ remainder = mdata->xfer_len % 4;
+ if (remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val,
+ trans->tx_buf + (cnt * 4) + mdata->num_xfered,
+ remainder);
+ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ }
+
+ mtk_spi_enable_transfer(master);
+
+ return IRQ_HANDLED;
+ }
+
+ if (mdata->tx_sgl)
+ trans->tx_dma += mdata->xfer_len;
+ if (mdata->rx_sgl)
+ trans->rx_dma += mdata->xfer_len;
+
+ if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
+ mdata->tx_sgl = sg_next(mdata->tx_sgl);
+ if (mdata->tx_sgl) {
+ trans->tx_dma = sg_dma_address(mdata->tx_sgl);
+ mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
+ }
+ }
+ if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
+ mdata->rx_sgl = sg_next(mdata->rx_sgl);
+ if (mdata->rx_sgl) {
+ trans->rx_dma = sg_dma_address(mdata->rx_sgl);
+ mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
+ }
+ }
+
+ if (!mdata->tx_sgl && !mdata->rx_sgl) {
+ /* spi disable dma */
+ cmd = readl(mdata->base + SPI_CMD_REG);
+ cmd &= ~SPI_CMD_TX_DMA;
+ cmd &= ~SPI_CMD_RX_DMA;
+ writel(cmd, mdata->base + SPI_CMD_REG);
+
+ spi_finalize_current_transfer(master);
+ return IRQ_HANDLED;
+ }
+
+ mtk_spi_update_mdata_len(master);
+ mtk_spi_setup_packet(master);
+ mtk_spi_setup_dma_addr(master, trans);
+ mtk_spi_enable_transfer(master);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
+ struct spi_mem_op *op)
+{
+ int opcode_len;
+
+ if (op->data.dir != SPI_MEM_NO_DATA) {
+ opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
+ if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
+ op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
+ /* force data buffer dma-aligned. */
+ op->data.nbytes -= op->data.nbytes % 4;
+ }
+ }
+
+ return 0;
+}
+
+static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (op->addr.nbytes && op->dummy.nbytes &&
+ op->addr.buswidth != op->dummy.buswidth)
+ return false;
+
+ if (op->addr.nbytes + op->dummy.nbytes > 16)
+ return false;
+
+ if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
+ if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
+ MTK_SPI_IPM_PACKET_LOOP ||
+ op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
+ return false;
+ }
+
+ return true;
+}
+
+static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
+ const struct spi_mem_op *op)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
+ mdata->base + SPI_TX_SRC_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (mdata->dev_comp->dma_ext)
+ writel((u32)(mdata->tx_dma >> 32),
+ mdata->base + SPI_TX_SRC_REG_64);
+#endif
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
+ mdata->base + SPI_RX_DST_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (mdata->dev_comp->dma_ext)
+ writel((u32)(mdata->rx_dma >> 32),
+ mdata->base + SPI_RX_DST_REG_64);
+#endif
+ }
+}
+
+static int mtk_spi_transfer_wait(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
+ /*
+ * For each byte we wait for 8 cycles of the SPI clock.
+ * Since speed is defined in Hz and we want milliseconds,
+ * so it should be 8 * 1000.
+ */
+ u64 ms = 8000LL;
+
+ if (op->data.dir == SPI_MEM_NO_DATA)
+ ms *= 32; /* prevent we may get 0 for short transfers. */
+ else
+ ms *= op->data.nbytes;
+ ms = div_u64(ms, mem->spi->max_speed_hz);
+ ms += ms + 1000; /* 1s tolerance */
+
+ if (ms > UINT_MAX)
+ ms = UINT_MAX;
+
+ if (!wait_for_completion_timeout(&mdata->spimem_done,
+ msecs_to_jiffies(ms))) {
+ dev_err(mdata->dev, "spi-mem transfer timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mtk_spi_mem_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
+ u32 reg_val, nio, tx_size;
+ char *tx_tmp_buf, *rx_tmp_buf;
+ int ret = 0;
+
+ mdata->use_spimem = true;
+ reinit_completion(&mdata->spimem_done);
+
+ mtk_spi_reset(mdata);
+ mtk_spi_hw_init(mem->spi->master, mem->spi);
+ mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
+
+ reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
+ /* opcode byte len */
+ reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
+ reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
+
+ /* addr & dummy byte len */
+ reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
+ if (op->addr.nbytes || op->dummy.nbytes)
+ reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
+ SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
+
+ /* data byte len */
+ if (op->data.dir == SPI_MEM_NO_DATA) {
+ reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
+ writel(0, mdata->base + SPI_CFG1_REG);
+ } else {
+ reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
+ mdata->xfer_len = op->data.nbytes;
+ mtk_spi_setup_packet(mem->spi->master);
+ }
+
+ if (op->addr.nbytes || op->dummy.nbytes) {
+ if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
+ reg_val |= SPI_CFG3_IPM_XMODE_EN;
+ else
+ reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
+ }
+
+ if (op->addr.buswidth == 2 ||
+ op->dummy.buswidth == 2 ||
+ op->data.buswidth == 2)
+ nio = 2;
+ else if (op->addr.buswidth == 4 ||
+ op->dummy.buswidth == 4 ||
+ op->data.buswidth == 4)
+ nio = 4;
+ else
+ nio = 1;
+
+ reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
+ reg_val |= PIN_MODE_CFG(nio);
+
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+ else
+ reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+ writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
+
+ tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ tx_size += op->data.nbytes;
+
+ tx_size = max_t(u32, tx_size, 32);
+
+ tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
+ if (!tx_tmp_buf) {
+ mdata->use_spimem = false;
+ return -ENOMEM;
+ }
+
+ tx_tmp_buf[0] = op->cmd.opcode;
+
+ if (op->addr.nbytes) {
+ int i;
+
+ for (i = 0; i < op->addr.nbytes; i++)
+ tx_tmp_buf[i + 1] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+ }
+
+ if (op->dummy.nbytes)
+ memset(tx_tmp_buf + op->addr.nbytes + 1,
+ 0xff,
+ op->dummy.nbytes);
+
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
+ op->data.buf.out,
+ op->data.nbytes);
+
+ mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
+ tx_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
+ rx_tmp_buf = kzalloc(op->data.nbytes,
+ GFP_KERNEL | GFP_DMA);
+ if (!rx_tmp_buf) {
+ ret = -ENOMEM;
+ goto unmap_tx_dma;
+ }
+ } else {
+ rx_tmp_buf = op->data.buf.in;
+ }
+
+ mdata->rx_dma = dma_map_single(mdata->dev,
+ rx_tmp_buf,
+ op->data.nbytes,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
+ ret = -ENOMEM;
+ goto kfree_rx_tmp_buf;
+ }
+ }
+
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val |= SPI_CMD_TX_DMA;
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ reg_val |= SPI_CMD_RX_DMA;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
+ mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
+
+ mtk_spi_enable_transfer(mem->spi->master);
+
+ /* Wait for the interrupt. */
+ ret = mtk_spi_transfer_wait(mem, op);
+ if (ret)
+ goto unmap_rx_dma;
+
+ /* spi disable dma */
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val &= ~SPI_CMD_TX_DMA;
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ reg_val &= ~SPI_CMD_RX_DMA;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
+unmap_rx_dma:
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_unmap_single(mdata->dev, mdata->rx_dma,
+ op->data.nbytes, DMA_FROM_DEVICE);
+ if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
+ memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
+ }
+kfree_rx_tmp_buf:
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ !IS_ALIGNED((size_t)op->data.buf.in, 4))
+ kfree(rx_tmp_buf);
+unmap_tx_dma:
+ dma_unmap_single(mdata->dev, mdata->tx_dma,
+ tx_size, DMA_TO_DEVICE);
+err_exit:
+ kfree(tx_tmp_buf);
+ mdata->use_spimem = false;
+
+ return ret;
+}
+
+static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
+ .adjust_op_size = mtk_spi_mem_adjust_op_size,
+ .supports_op = mtk_spi_mem_supports_op,
+ .exec_op = mtk_spi_mem_exec_op,
+};
+
+static int mtk_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct mtk_spi *mdata;
+ int i, irq, ret, addr_bits;
+
+ master = devm_spi_alloc_master(dev, sizeof(*mdata));
+ if (!master)
+ return dev_err_probe(dev, -ENOMEM, "failed to alloc spi master\n");
+
+ master->auto_runtime_pm = true;
+ master->dev.of_node = dev->of_node;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
+ master->set_cs = mtk_spi_set_cs;
+ master->prepare_message = mtk_spi_prepare_message;
+ master->transfer_one = mtk_spi_transfer_one;
+ master->can_dma = mtk_spi_can_dma;
+ master->setup = mtk_spi_setup;
+ master->set_cs_timing = mtk_spi_set_hw_cs_timing;
+ master->use_gpio_descriptors = true;
+
+ mdata = spi_master_get_devdata(master);
+ mdata->dev_comp = device_get_match_data(dev);
+
+ if (mdata->dev_comp->enhance_timing)
+ master->mode_bits |= SPI_CS_HIGH;
+
+ if (mdata->dev_comp->must_tx)
+ master->flags = SPI_MASTER_MUST_TX;
+ if (mdata->dev_comp->ipm_design)
+ master->mode_bits |= SPI_LOOP;
+
+ if (mdata->dev_comp->ipm_design) {
+ mdata->dev = dev;
+ master->mem_ops = &mtk_spi_mem_ops;
+ init_completion(&mdata->spimem_done);
+ }
+
+ if (mdata->dev_comp->need_pad_sel) {
+ mdata->pad_num = of_property_count_u32_elems(dev->of_node,
+ "mediatek,pad-select");
+ if (mdata->pad_num < 0)
+ return dev_err_probe(dev, -EINVAL,
+ "No 'mediatek,pad-select' property\n");
+
+ mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
+ sizeof(u32), GFP_KERNEL);
+ if (!mdata->pad_sel)
+ return -ENOMEM;
+
+ for (i = 0; i < mdata->pad_num; i++) {
+ of_property_read_u32_index(dev->of_node,
+ "mediatek,pad-select",
+ i, &mdata->pad_sel[i]);
+ if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL)
+ return dev_err_probe(dev, -EINVAL,
+ "wrong pad-sel[%d]: %u\n",
+ i, mdata->pad_sel[i]);
+ }
+ }
+
+ platform_set_drvdata(pdev, master);
+ mdata->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdata->base))
+ return PTR_ERR(mdata->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ if (mdata->dev_comp->ipm_design)
+ dma_set_max_seg_size(dev, SZ_16M);
+ else
+ dma_set_max_seg_size(dev, SZ_256K);
+
+ mdata->parent_clk = devm_clk_get(dev, "parent-clk");
+ if (IS_ERR(mdata->parent_clk))
+ return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
+ "failed to get parent-clk\n");
+
+ mdata->sel_clk = devm_clk_get(dev, "sel-clk");
+ if (IS_ERR(mdata->sel_clk))
+ return dev_err_probe(dev, PTR_ERR(mdata->sel_clk), "failed to get sel-clk\n");
+
+ mdata->spi_clk = devm_clk_get(dev, "spi-clk");
+ if (IS_ERR(mdata->spi_clk))
+ return dev_err_probe(dev, PTR_ERR(mdata->spi_clk), "failed to get spi-clk\n");
+
+ mdata->spi_hclk = devm_clk_get_optional(dev, "hclk");
+ if (IS_ERR(mdata->spi_hclk))
+ return dev_err_probe(dev, PTR_ERR(mdata->spi_hclk), "failed to get hclk\n");
+
+ ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to clk_set_parent\n");
+
+ ret = clk_prepare_enable(mdata->spi_hclk);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to enable hclk\n");
+
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ clk_disable_unprepare(mdata->spi_hclk);
+ return dev_err_probe(dev, ret, "failed to enable spi_clk\n");
+ }
+
+ mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
+
+ if (mdata->dev_comp->no_need_unprepare) {
+ clk_disable(mdata->spi_clk);
+ clk_disable(mdata->spi_hclk);
+ } else {
+ clk_disable_unprepare(mdata->spi_clk);
+ clk_disable_unprepare(mdata->spi_hclk);
+ }
+
+ if (mdata->dev_comp->need_pad_sel) {
+ if (mdata->pad_num != master->num_chipselect)
+ return dev_err_probe(dev, -EINVAL,
+ "pad_num does not match num_chipselect(%d != %d)\n",
+ mdata->pad_num, master->num_chipselect);
+
+ if (!master->cs_gpiods && master->num_chipselect > 1)
+ return dev_err_probe(dev, -EINVAL,
+ "cs_gpios not specified and num_chipselect > 1\n");
+ }
+
+ if (mdata->dev_comp->dma_ext)
+ addr_bits = DMA_ADDR_EXT_BITS;
+ else
+ addr_bits = DMA_ADDR_DEF_BITS;
+ ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
+ if (ret)
+ dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
+ addr_bits, ret);
+
+ ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
+ IRQF_TRIGGER_NONE, dev_name(dev), master);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register irq\n");
+
+ pm_runtime_enable(dev);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ pm_runtime_disable(dev);
+ return dev_err_probe(dev, ret, "failed to register master\n");
+ }
+
+ return 0;
+}
+
+static int mtk_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+ int ret;
+
+ if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
+ complete(&mdata->spimem_done);
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ mtk_spi_reset(mdata);
+
+ if (mdata->dev_comp->no_need_unprepare) {
+ clk_unprepare(mdata->spi_clk);
+ clk_unprepare(mdata->spi_hclk);
+ }
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_spi_suspend(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ clk_disable_unprepare(mdata->spi_clk);
+ clk_disable_unprepare(mdata->spi_hclk);
+ }
+
+ return ret;
+}
+
+static int mtk_spi_resume(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mdata->spi_hclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
+ clk_disable_unprepare(mdata->spi_clk);
+ return ret;
+ }
+ }
+
+ ret = spi_master_resume(master);
+ if (ret < 0) {
+ clk_disable_unprepare(mdata->spi_clk);
+ clk_disable_unprepare(mdata->spi_hclk);
+ }
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int mtk_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ if (mdata->dev_comp->no_need_unprepare) {
+ clk_disable(mdata->spi_clk);
+ clk_disable(mdata->spi_hclk);
+ } else {
+ clk_disable_unprepare(mdata->spi_clk);
+ clk_disable_unprepare(mdata->spi_hclk);
+ }
+
+ return 0;
+}
+
+static int mtk_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+ int ret;
+
+ if (mdata->dev_comp->no_need_unprepare) {
+ ret = clk_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+ ret = clk_enable(mdata->spi_hclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
+ clk_disable(mdata->spi_clk);
+ return ret;
+ }
+ } else {
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mdata->spi_hclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
+ clk_disable_unprepare(mdata->spi_clk);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops mtk_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
+ SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
+ mtk_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver mtk_spi_driver = {
+ .driver = {
+ .name = "mtk-spi",
+ .pm = &mtk_spi_pm,
+ .of_match_table = mtk_spi_of_match,
+ },
+ .probe = mtk_spi_probe,
+ .remove = mtk_spi_remove,
+};
+
+module_platform_driver(mtk_spi_driver);
+
+MODULE_DESCRIPTION("MTK SPI Controller driver");
+MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mtk-spi");
diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
new file mode 100644
index 000000000..c4cc8e2f8
--- /dev/null
+++ b/drivers/spi/spi-mt7621.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// spi-mt7621.c -- MediaTek MT7621 SPI controller driver
+//
+// Copyright (C) 2011 Sergiy <piratfm@gmail.com>
+// Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+// Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
+//
+// Some parts are based on spi-orion.c:
+// Author: Shadi Ammouri <shadi@marvell.com>
+// Copyright (C) 2007-2008 Marvell Ltd.
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "spi-mt7621"
+
+/* in usec */
+#define RALINK_SPI_WAIT_MAX_LOOP 2000
+
+/* SPISTAT register bit field */
+#define SPISTAT_BUSY BIT(0)
+
+#define MT7621_SPI_TRANS 0x00
+#define SPITRANS_BUSY BIT(16)
+
+#define MT7621_SPI_OPCODE 0x04
+#define MT7621_SPI_DATA0 0x08
+#define MT7621_SPI_DATA4 0x18
+#define SPI_CTL_TX_RX_CNT_MASK 0xff
+#define SPI_CTL_START BIT(8)
+
+#define MT7621_SPI_MASTER 0x28
+#define MASTER_MORE_BUFMODE BIT(2)
+#define MASTER_FULL_DUPLEX BIT(10)
+#define MASTER_RS_CLK_SEL GENMASK(27, 16)
+#define MASTER_RS_CLK_SEL_SHIFT 16
+#define MASTER_RS_SLAVE_SEL GENMASK(31, 29)
+
+#define MT7621_SPI_MOREBUF 0x2c
+#define MT7621_SPI_POLAR 0x38
+#define MT7621_SPI_SPACE 0x3c
+
+#define MT7621_CPHA BIT(5)
+#define MT7621_CPOL BIT(4)
+#define MT7621_LSB_FIRST BIT(3)
+
+struct mt7621_spi {
+ struct spi_controller *master;
+ void __iomem *base;
+ unsigned int sys_freq;
+ unsigned int speed;
+ int pending_write;
+};
+
+static inline struct mt7621_spi *spidev_to_mt7621_spi(struct spi_device *spi)
+{
+ return spi_controller_get_devdata(spi->master);
+}
+
+static inline u32 mt7621_spi_read(struct mt7621_spi *rs, u32 reg)
+{
+ return ioread32(rs->base + reg);
+}
+
+static inline void mt7621_spi_write(struct mt7621_spi *rs, u32 reg, u32 val)
+{
+ iowrite32(val, rs->base + reg);
+}
+
+static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
+{
+ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
+ int cs = spi->chip_select;
+ u32 polar = 0;
+ u32 master;
+
+ /*
+ * Select SPI device 7, enable "more buffer mode" and disable
+ * full-duplex (only half-duplex really works on this chip
+ * reliably)
+ */
+ master = mt7621_spi_read(rs, MT7621_SPI_MASTER);
+ master |= MASTER_RS_SLAVE_SEL | MASTER_MORE_BUFMODE;
+ master &= ~MASTER_FULL_DUPLEX;
+ mt7621_spi_write(rs, MT7621_SPI_MASTER, master);
+
+ rs->pending_write = 0;
+
+ if (enable)
+ polar = BIT(cs);
+ mt7621_spi_write(rs, MT7621_SPI_POLAR, polar);
+}
+
+static int mt7621_spi_prepare(struct spi_device *spi, unsigned int speed)
+{
+ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
+ u32 rate;
+ u32 reg;
+
+ dev_dbg(&spi->dev, "speed:%u\n", speed);
+
+ rate = DIV_ROUND_UP(rs->sys_freq, speed);
+ dev_dbg(&spi->dev, "rate-1:%u\n", rate);
+
+ if (rate > 4097)
+ return -EINVAL;
+
+ if (rate < 2)
+ rate = 2;
+
+ reg = mt7621_spi_read(rs, MT7621_SPI_MASTER);
+ reg &= ~MASTER_RS_CLK_SEL;
+ reg |= (rate - 2) << MASTER_RS_CLK_SEL_SHIFT;
+ rs->speed = speed;
+
+ reg &= ~MT7621_LSB_FIRST;
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= MT7621_LSB_FIRST;
+
+ /*
+ * This SPI controller seems to be tested on SPI flash only and some
+ * bits are swizzled under other SPI modes probably due to incorrect
+ * wiring inside the silicon. Only mode 0 works correctly.
+ */
+ reg &= ~(MT7621_CPHA | MT7621_CPOL);
+
+ mt7621_spi_write(rs, MT7621_SPI_MASTER, reg);
+
+ return 0;
+}
+
+static inline int mt7621_spi_wait_till_ready(struct mt7621_spi *rs)
+{
+ int i;
+
+ for (i = 0; i < RALINK_SPI_WAIT_MAX_LOOP; i++) {
+ u32 status;
+
+ status = mt7621_spi_read(rs, MT7621_SPI_TRANS);
+ if ((status & SPITRANS_BUSY) == 0)
+ return 0;
+ cpu_relax();
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void mt7621_spi_read_half_duplex(struct mt7621_spi *rs,
+ int rx_len, u8 *buf)
+{
+ int tx_len;
+
+ /*
+ * Combine with any pending write, and perform one or more half-duplex
+ * transactions reading 'len' bytes. Data to be written is already in
+ * MT7621_SPI_DATA.
+ */
+ tx_len = rs->pending_write;
+ rs->pending_write = 0;
+
+ while (rx_len || tx_len) {
+ int i;
+ u32 val = (min(tx_len, 4) * 8) << 24;
+ int rx = min(rx_len, 32);
+
+ if (tx_len > 4)
+ val |= (tx_len - 4) * 8;
+ val |= (rx * 8) << 12;
+ mt7621_spi_write(rs, MT7621_SPI_MOREBUF, val);
+
+ tx_len = 0;
+
+ val = mt7621_spi_read(rs, MT7621_SPI_TRANS);
+ val |= SPI_CTL_START;
+ mt7621_spi_write(rs, MT7621_SPI_TRANS, val);
+
+ mt7621_spi_wait_till_ready(rs);
+
+ for (i = 0; i < rx; i++) {
+ if ((i % 4) == 0)
+ val = mt7621_spi_read(rs, MT7621_SPI_DATA0 + i);
+ *buf++ = val & 0xff;
+ val >>= 8;
+ }
+
+ rx_len -= i;
+ }
+}
+
+static inline void mt7621_spi_flush(struct mt7621_spi *rs)
+{
+ mt7621_spi_read_half_duplex(rs, 0, NULL);
+}
+
+static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
+ int tx_len, const u8 *buf)
+{
+ int len = rs->pending_write;
+ int val = 0;
+
+ if (len & 3) {
+ val = mt7621_spi_read(rs, MT7621_SPI_OPCODE + (len & ~3));
+ if (len < 4) {
+ val <<= (4 - len) * 8;
+ val = swab32(val);
+ }
+ }
+
+ while (tx_len > 0) {
+ if (len >= 36) {
+ rs->pending_write = len;
+ mt7621_spi_flush(rs);
+ len = 0;
+ }
+
+ val |= *buf++ << (8 * (len & 3));
+ len++;
+ if ((len & 3) == 0) {
+ if (len == 4)
+ /* The byte-order of the opcode is weird! */
+ val = swab32(val);
+ mt7621_spi_write(rs, MT7621_SPI_OPCODE + len - 4, val);
+ val = 0;
+ }
+ tx_len -= 1;
+ }
+
+ if (len & 3) {
+ if (len < 4) {
+ val = swab32(val);
+ val >>= (4 - len) * 8;
+ }
+ mt7621_spi_write(rs, MT7621_SPI_OPCODE + (len & ~3), val);
+ }
+
+ rs->pending_write = len;
+}
+
+static int mt7621_spi_transfer_one_message(struct spi_controller *master,
+ struct spi_message *m)
+{
+ struct mt7621_spi *rs = spi_controller_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ unsigned int speed = spi->max_speed_hz;
+ struct spi_transfer *t = NULL;
+ int status = 0;
+
+ mt7621_spi_wait_till_ready(rs);
+
+ list_for_each_entry(t, &m->transfers, transfer_list)
+ if (t->speed_hz < speed)
+ speed = t->speed_hz;
+
+ if (mt7621_spi_prepare(spi, speed)) {
+ status = -EIO;
+ goto msg_done;
+ }
+
+ /* Assert CS */
+ mt7621_spi_set_cs(spi, 1);
+
+ m->actual_length = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if ((t->rx_buf) && (t->tx_buf)) {
+ /*
+ * This controller will shift some extra data out
+ * of spi_opcode if (mosi_bit_cnt > 0) &&
+ * (cmd_bit_cnt == 0). So the claimed full-duplex
+ * support is broken since we have no way to read
+ * the MISO value during that bit.
+ */
+ status = -EIO;
+ goto msg_done;
+ } else if (t->rx_buf) {
+ mt7621_spi_read_half_duplex(rs, t->len, t->rx_buf);
+ } else if (t->tx_buf) {
+ mt7621_spi_write_half_duplex(rs, t->len, t->tx_buf);
+ }
+ m->actual_length += t->len;
+ }
+
+ /* Flush data and deassert CS */
+ mt7621_spi_flush(rs);
+ mt7621_spi_set_cs(spi, 0);
+
+msg_done:
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static int mt7621_spi_setup(struct spi_device *spi)
+{
+ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
+
+ if ((spi->max_speed_hz == 0) ||
+ (spi->max_speed_hz > (rs->sys_freq / 2)))
+ spi->max_speed_hz = rs->sys_freq / 2;
+
+ if (spi->max_speed_hz < (rs->sys_freq / 4097)) {
+ dev_err(&spi->dev, "setup: requested speed is too low %d Hz\n",
+ spi->max_speed_hz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mt7621_spi_match[] = {
+ { .compatible = "ralink,mt7621-spi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt7621_spi_match);
+
+static int mt7621_spi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct spi_controller *master;
+ struct mt7621_spi *rs;
+ void __iomem *base;
+ struct clk *clk;
+ int ret;
+
+ match = of_match_device(mt7621_spi_match, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "unable to get SYS clock\n");
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rs));
+ if (!master) {
+ dev_info(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ master->mode_bits = SPI_LSB_FIRST;
+ master->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ master->setup = mt7621_spi_setup;
+ master->transfer_one_message = mt7621_spi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->num_chipselect = 2;
+
+ dev_set_drvdata(&pdev->dev, master);
+
+ rs = spi_controller_get_devdata(master);
+ rs->base = base;
+ rs->master = master;
+ rs->sys_freq = clk_get_rate(clk);
+ rs->pending_write = 0;
+ dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
+
+ ret = device_reset(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "SPI reset failed!\n");
+ return ret;
+ }
+
+ return devm_spi_register_controller(&pdev->dev, master);
+}
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+
+static struct platform_driver mt7621_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mt7621_spi_match,
+ },
+ .probe = mt7621_spi_probe,
+};
+
+module_platform_driver(mt7621_spi_driver);
+
+MODULE_DESCRIPTION("MT7621 SPI driver");
+MODULE_AUTHOR("Felix Fietkau <nbd@nbd.name>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
new file mode 100644
index 000000000..d167699a1
--- /dev/null
+++ b/drivers/spi/spi-mtk-nor.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Mediatek SPI NOR controller driver
+//
+// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/string.h>
+
+#define DRIVER_NAME "mtk-spi-nor"
+
+#define MTK_NOR_REG_CMD 0x00
+#define MTK_NOR_CMD_WRITE BIT(4)
+#define MTK_NOR_CMD_PROGRAM BIT(2)
+#define MTK_NOR_CMD_READ BIT(0)
+#define MTK_NOR_CMD_MASK GENMASK(5, 0)
+
+#define MTK_NOR_REG_PRG_CNT 0x04
+#define MTK_NOR_PRG_CNT_MAX 56
+#define MTK_NOR_REG_RDATA 0x0c
+
+#define MTK_NOR_REG_RADR0 0x10
+#define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
+#define MTK_NOR_REG_RADR3 0xc8
+
+#define MTK_NOR_REG_WDATA 0x1c
+
+#define MTK_NOR_REG_PRGDATA0 0x20
+#define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
+#define MTK_NOR_REG_PRGDATA_MAX 5
+
+#define MTK_NOR_REG_SHIFT0 0x38
+#define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
+#define MTK_NOR_REG_SHIFT_MAX 9
+
+#define MTK_NOR_REG_CFG1 0x60
+#define MTK_NOR_FAST_READ BIT(0)
+
+#define MTK_NOR_REG_CFG2 0x64
+#define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
+#define MTK_NOR_WR_BUF_EN BIT(0)
+
+#define MTK_NOR_REG_PP_DATA 0x98
+
+#define MTK_NOR_REG_IRQ_STAT 0xa8
+#define MTK_NOR_REG_IRQ_EN 0xac
+#define MTK_NOR_IRQ_DMA BIT(7)
+#define MTK_NOR_IRQ_MASK GENMASK(7, 0)
+
+#define MTK_NOR_REG_CFG3 0xb4
+#define MTK_NOR_DISABLE_WREN BIT(7)
+#define MTK_NOR_DISABLE_SR_POLL BIT(5)
+
+#define MTK_NOR_REG_WP 0xc4
+#define MTK_NOR_ENABLE_SF_CMD 0x30
+
+#define MTK_NOR_REG_BUSCFG 0xcc
+#define MTK_NOR_4B_ADDR BIT(4)
+#define MTK_NOR_QUAD_ADDR BIT(3)
+#define MTK_NOR_QUAD_READ BIT(2)
+#define MTK_NOR_DUAL_ADDR BIT(1)
+#define MTK_NOR_DUAL_READ BIT(0)
+#define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
+
+#define MTK_NOR_REG_DMA_CTL 0x718
+#define MTK_NOR_DMA_START BIT(0)
+
+#define MTK_NOR_REG_DMA_FADR 0x71c
+#define MTK_NOR_REG_DMA_DADR 0x720
+#define MTK_NOR_REG_DMA_END_DADR 0x724
+#define MTK_NOR_REG_DMA_DADR_HB 0x738
+#define MTK_NOR_REG_DMA_END_DADR_HB 0x73c
+
+#define MTK_NOR_PRG_MAX_SIZE 6
+// Reading DMA src/dst addresses have to be 16-byte aligned
+#define MTK_NOR_DMA_ALIGN 16
+#define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
+// and we allocate a bounce buffer if destination address isn't aligned.
+#define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
+
+// Buffered page program can do one 128-byte transfer
+#define MTK_NOR_PP_SIZE 128
+
+#define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
+
+struct mtk_nor_caps {
+ u8 dma_bits;
+
+ /* extra_dummy_bit is adding for the IP of new SoCs.
+ * Some new SoCs modify the timing of fetching registers' values
+ * and IDs of nor flash, they need a extra_dummy_bit which can add
+ * more clock cycles for fetching data.
+ */
+ u8 extra_dummy_bit;
+};
+
+struct mtk_nor {
+ struct spi_controller *ctlr;
+ struct device *dev;
+ void __iomem *base;
+ u8 *buffer;
+ dma_addr_t buffer_dma;
+ struct clk *spi_clk;
+ struct clk *ctlr_clk;
+ struct clk *axi_clk;
+ struct clk *axi_s_clk;
+ unsigned int spi_freq;
+ bool wbuf_en;
+ bool has_irq;
+ bool high_dma;
+ struct completion op_done;
+ const struct mtk_nor_caps *caps;
+};
+
+static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
+{
+ u32 val = readl(sp->base + reg);
+
+ val &= ~clr;
+ val |= set;
+ writel(val, sp->base + reg);
+}
+
+static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk)
+{
+ ulong delay = CLK_TO_US(sp, clk);
+ u32 reg;
+ int ret;
+
+ writel(cmd, sp->base + MTK_NOR_REG_CMD);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd),
+ delay / 3, (delay + 1) * 200);
+ if (ret < 0)
+ dev_err(sp->dev, "command %u timeout.\n", cmd);
+ return ret;
+}
+
+static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ u32 addr = op->addr.val;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i));
+ addr >>= 8;
+ }
+ if (op->addr.nbytes == 4) {
+ writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3);
+ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
+ } else {
+ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
+ }
+}
+
+static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK);
+}
+
+static bool mtk_nor_match_read(const struct spi_mem_op *op)
+{
+ int dummy = 0;
+
+ if (op->dummy.nbytes)
+ dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
+
+ if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
+ if (op->addr.buswidth == 1)
+ return dummy == 8;
+ else if (op->addr.buswidth == 2)
+ return dummy == 4;
+ else if (op->addr.buswidth == 4)
+ return dummy == 6;
+ } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) {
+ if (op->cmd.opcode == 0x03)
+ return dummy == 0;
+ else if (op->cmd.opcode == 0x0b)
+ return dummy == 8;
+ }
+ return false;
+}
+
+static bool mtk_nor_match_prg(const struct spi_mem_op *op)
+{
+ int tx_len, rx_len, prg_len, prg_left;
+
+ // prg mode is spi-only.
+ if ((op->cmd.buswidth > 1) || (op->addr.buswidth > 1) ||
+ (op->dummy.buswidth > 1) || (op->data.buswidth > 1))
+ return false;
+
+ tx_len = op->cmd.nbytes + op->addr.nbytes;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ // count dummy bytes only if we need to write data after it
+ tx_len += op->dummy.nbytes;
+
+ // leave at least one byte for data
+ if (tx_len > MTK_NOR_REG_PRGDATA_MAX)
+ return false;
+
+ // if there's no addr, meaning adjust_op_size is impossible,
+ // check data length as well.
+ if ((!op->addr.nbytes) &&
+ (tx_len + op->data.nbytes > MTK_NOR_REG_PRGDATA_MAX + 1))
+ return false;
+ } else if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (tx_len > MTK_NOR_REG_PRGDATA_MAX + 1)
+ return false;
+
+ rx_len = op->data.nbytes;
+ prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
+ if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
+ prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
+ if (rx_len > prg_left) {
+ if (!op->addr.nbytes)
+ return false;
+ rx_len = prg_left;
+ }
+
+ prg_len = tx_len + op->dummy.nbytes + rx_len;
+ if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
+ return false;
+ } else {
+ prg_len = tx_len + op->dummy.nbytes;
+ if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
+ return false;
+ }
+ return true;
+}
+
+static void mtk_nor_adj_prg_size(struct spi_mem_op *op)
+{
+ int tx_len, tx_left, prg_left;
+
+ tx_len = op->cmd.nbytes + op->addr.nbytes;
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ tx_len += op->dummy.nbytes;
+ tx_left = MTK_NOR_REG_PRGDATA_MAX + 1 - tx_len;
+ if (op->data.nbytes > tx_left)
+ op->data.nbytes = tx_left;
+ } else if (op->data.dir == SPI_MEM_DATA_IN) {
+ prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
+ if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
+ prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
+ if (op->data.nbytes > prg_left)
+ op->data.nbytes = prg_left;
+ }
+}
+
+static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
+
+ if (!op->data.nbytes)
+ return 0;
+
+ if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
+ if ((op->data.dir == SPI_MEM_DATA_IN) &&
+ mtk_nor_match_read(op)) {
+ // limit size to prevent timeout calculation overflow
+ if (op->data.nbytes > 0x400000)
+ op->data.nbytes = 0x400000;
+
+ if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
+ (op->data.nbytes < MTK_NOR_DMA_ALIGN))
+ op->data.nbytes = 1;
+ else if (!need_bounce(sp, op))
+ op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
+ else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
+ op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
+ return 0;
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes >= MTK_NOR_PP_SIZE)
+ op->data.nbytes = MTK_NOR_PP_SIZE;
+ else
+ op->data.nbytes = 1;
+ return 0;
+ }
+ }
+
+ mtk_nor_adj_prg_size(op);
+ return 0;
+}
+
+static bool mtk_nor_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (op->cmd.buswidth != 1)
+ return false;
+
+ if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
+ switch (op->data.dir) {
+ case SPI_MEM_DATA_IN:
+ if (mtk_nor_match_read(op))
+ return true;
+ break;
+ case SPI_MEM_DATA_OUT:
+ if ((op->addr.buswidth == 1) &&
+ (op->dummy.nbytes == 0) &&
+ (op->data.buswidth == 1))
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return mtk_nor_match_prg(op);
+}
+
+static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ u32 reg = 0;
+
+ if (op->addr.nbytes == 4)
+ reg |= MTK_NOR_4B_ADDR;
+
+ if (op->data.buswidth == 4) {
+ reg |= MTK_NOR_QUAD_READ;
+ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4));
+ if (op->addr.buswidth == 4)
+ reg |= MTK_NOR_QUAD_ADDR;
+ } else if (op->data.buswidth == 2) {
+ reg |= MTK_NOR_DUAL_READ;
+ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3));
+ if (op->addr.buswidth == 2)
+ reg |= MTK_NOR_DUAL_ADDR;
+ } else {
+ if (op->cmd.opcode == 0x0b)
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0);
+ else
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ);
+ }
+ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
+}
+
+static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length,
+ dma_addr_t dma_addr)
+{
+ int ret = 0;
+ ulong delay;
+ u32 reg;
+
+ writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
+ writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
+ writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR);
+
+ if (sp->high_dma) {
+ writel(upper_32_bits(dma_addr),
+ sp->base + MTK_NOR_REG_DMA_DADR_HB);
+ writel(upper_32_bits(dma_addr + length),
+ sp->base + MTK_NOR_REG_DMA_END_DADR_HB);
+ }
+
+ if (sp->has_irq) {
+ reinit_completion(&sp->op_done);
+ mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0);
+ }
+
+ mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
+
+ delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE);
+
+ if (sp->has_irq) {
+ if (!wait_for_completion_timeout(&sp->op_done,
+ (delay + 1) * 100))
+ ret = -ETIMEDOUT;
+ } else {
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg,
+ !(reg & MTK_NOR_DMA_START), delay / 3,
+ (delay + 1) * 100);
+ }
+
+ if (ret < 0)
+ dev_err(sp->dev, "dma read timeout.\n");
+
+ return ret;
+}
+
+static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ unsigned int rdlen;
+ int ret;
+
+ if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK)
+ rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
+ else
+ rdlen = op->data.nbytes;
+
+ ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma);
+
+ if (!ret)
+ memcpy(op->data.buf.in, sp->buffer, op->data.nbytes);
+
+ return ret;
+}
+
+static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ int ret;
+ dma_addr_t dma_addr;
+
+ if (need_bounce(sp, op))
+ return mtk_nor_read_bounce(sp, op);
+
+ dma_addr = dma_map_single(sp->dev, op->data.buf.in,
+ op->data.nbytes, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(sp->dev, dma_addr))
+ return -EINVAL;
+
+ ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr);
+
+ dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ u8 *buf = op->data.buf.in;
+ int ret;
+
+ ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
+ if (!ret)
+ buf[0] = readb(sp->base + MTK_NOR_REG_RDATA);
+ return ret;
+}
+
+static int mtk_nor_write_buffer_enable(struct mtk_nor *sp)
+{
+ int ret;
+ u32 val;
+
+ if (sp->wbuf_en)
+ return 0;
+
+ val = readl(sp->base + MTK_NOR_REG_CFG2);
+ writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
+ val & MTK_NOR_WR_BUF_EN, 0, 10000);
+ if (!ret)
+ sp->wbuf_en = true;
+ return ret;
+}
+
+static int mtk_nor_write_buffer_disable(struct mtk_nor *sp)
+{
+ int ret;
+ u32 val;
+
+ if (!sp->wbuf_en)
+ return 0;
+ val = readl(sp->base + MTK_NOR_REG_CFG2);
+ writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
+ !(val & MTK_NOR_WR_BUF_EN), 0, 10000);
+ if (!ret)
+ sp->wbuf_en = false;
+ return ret;
+}
+
+static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ const u8 *buf = op->data.buf.out;
+ u32 val;
+ int ret, i;
+
+ ret = mtk_nor_write_buffer_enable(sp);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < op->data.nbytes; i += 4) {
+ val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
+ buf[i];
+ writel(val, sp->base + MTK_NOR_REG_PP_DATA);
+ }
+ return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE,
+ (op->data.nbytes + 5) * BITS_PER_BYTE);
+}
+
+static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
+ const struct spi_mem_op *op)
+{
+ const u8 *buf = op->data.buf.out;
+ int ret;
+
+ ret = mtk_nor_write_buffer_disable(sp);
+ if (ret < 0)
+ return ret;
+ writeb(buf[0], sp->base + MTK_NOR_REG_WDATA);
+ return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
+}
+
+static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ int rx_len = 0;
+ int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
+ int tx_len, prg_len;
+ int i, ret;
+ void __iomem *reg;
+ u8 bufbyte;
+
+ tx_len = op->cmd.nbytes + op->addr.nbytes;
+
+ // count dummy bytes only if we need to write data after it
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ tx_len += op->dummy.nbytes + op->data.nbytes;
+ else if (op->data.dir == SPI_MEM_DATA_IN)
+ rx_len = op->data.nbytes;
+
+ prg_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes +
+ op->data.nbytes;
+
+ // an invalid op may reach here if the caller calls exec_op without
+ // adjust_op_size. return -EINVAL instead of -ENOTSUPP so that
+ // spi-mem won't try this op again with generic spi transfers.
+ if ((tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) ||
+ (rx_len > MTK_NOR_REG_SHIFT_MAX + 1) ||
+ (prg_len > MTK_NOR_PRG_CNT_MAX / 8))
+ return -EINVAL;
+
+ // fill tx data
+ for (i = op->cmd.nbytes; i > 0; i--, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ bufbyte = (op->cmd.opcode >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
+ writeb(bufbyte, reg);
+ }
+
+ for (i = op->addr.nbytes; i > 0; i--, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ bufbyte = (op->addr.val >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
+ writeb(bufbyte, reg);
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ for (i = 0; i < op->dummy.nbytes; i++, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ writeb(0, reg);
+ }
+
+ for (i = 0; i < op->data.nbytes; i++, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ writeb(((const u8 *)(op->data.buf.out))[i], reg);
+ }
+ }
+
+ for (; reg_offset >= 0; reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ writeb(0, reg);
+ }
+
+ // trigger op
+ if (rx_len)
+ writel(prg_len * BITS_PER_BYTE + sp->caps->extra_dummy_bit,
+ sp->base + MTK_NOR_REG_PRG_CNT);
+ else
+ writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
+
+ ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
+ prg_len * BITS_PER_BYTE);
+ if (ret)
+ return ret;
+
+ // fetch read data
+ reg_offset = 0;
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ for (i = op->data.nbytes - 1; i >= 0; i--, reg_offset++) {
+ reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
+ ((u8 *)(op->data.buf.in))[i] = readb(reg);
+ }
+ }
+
+ return 0;
+}
+
+static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ if ((op->data.nbytes == 0) ||
+ ((op->addr.nbytes != 3) && (op->addr.nbytes != 4)))
+ return mtk_nor_spi_mem_prg(sp, op);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ mtk_nor_set_addr(sp, op);
+ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0);
+ if (op->data.nbytes == MTK_NOR_PP_SIZE)
+ return mtk_nor_pp_buffered(sp, op);
+ return mtk_nor_pp_unbuffered(sp, op);
+ }
+
+ if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) {
+ ret = mtk_nor_write_buffer_disable(sp);
+ if (ret < 0)
+ return ret;
+ mtk_nor_setup_bus(sp, op);
+ if (op->data.nbytes == 1) {
+ mtk_nor_set_addr(sp, op);
+ return mtk_nor_read_pio(sp, op);
+ } else {
+ return mtk_nor_read_dma(sp, op);
+ }
+ }
+
+ return mtk_nor_spi_mem_prg(sp, op);
+}
+
+static int mtk_nor_setup(struct spi_device *spi)
+{
+ struct mtk_nor *sp = spi_controller_get_devdata(spi->master);
+
+ if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) {
+ dev_err(&spi->dev, "spi clock should be %u Hz.\n",
+ sp->spi_freq);
+ return -EINVAL;
+ }
+ spi->max_speed_hz = sp->spi_freq;
+
+ return 0;
+}
+
+static int mtk_nor_transfer_one_message(struct spi_controller *master,
+ struct spi_message *m)
+{
+ struct mtk_nor *sp = spi_controller_get_devdata(master);
+ struct spi_transfer *t = NULL;
+ unsigned long trx_len = 0;
+ int stat = 0;
+ int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
+ void __iomem *reg;
+ const u8 *txbuf;
+ u8 *rxbuf;
+ int i;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ txbuf = t->tx_buf;
+ for (i = 0; i < t->len; i++, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ if (txbuf)
+ writeb(txbuf[i], reg);
+ else
+ writeb(0, reg);
+ }
+ trx_len += t->len;
+ }
+
+ writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
+
+ stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
+ trx_len * BITS_PER_BYTE);
+ if (stat < 0)
+ goto msg_done;
+
+ reg_offset = trx_len - 1;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ rxbuf = t->rx_buf;
+ for (i = 0; i < t->len; i++, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
+ if (rxbuf)
+ rxbuf[i] = readb(reg);
+ }
+ }
+
+ m->actual_length = trx_len;
+msg_done:
+ m->status = stat;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static void mtk_nor_disable_clk(struct mtk_nor *sp)
+{
+ clk_disable_unprepare(sp->spi_clk);
+ clk_disable_unprepare(sp->ctlr_clk);
+ clk_disable_unprepare(sp->axi_clk);
+ clk_disable_unprepare(sp->axi_s_clk);
+}
+
+static int mtk_nor_enable_clk(struct mtk_nor *sp)
+{
+ int ret;
+
+ ret = clk_prepare_enable(sp->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(sp->ctlr_clk);
+ if (ret) {
+ clk_disable_unprepare(sp->spi_clk);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sp->axi_clk);
+ if (ret) {
+ clk_disable_unprepare(sp->spi_clk);
+ clk_disable_unprepare(sp->ctlr_clk);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sp->axi_s_clk);
+ if (ret) {
+ clk_disable_unprepare(sp->spi_clk);
+ clk_disable_unprepare(sp->ctlr_clk);
+ clk_disable_unprepare(sp->axi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_nor_init(struct mtk_nor *sp)
+{
+ writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
+ writel(MTK_NOR_IRQ_MASK, sp->base + MTK_NOR_REG_IRQ_STAT);
+
+ writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG3,
+ MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
+}
+
+static irqreturn_t mtk_nor_irq_handler(int irq, void *data)
+{
+ struct mtk_nor *sp = data;
+ u32 irq_status, irq_enabled;
+
+ irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT);
+ irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN);
+ // write status back to clear interrupt
+ writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT);
+
+ if (!(irq_status & irq_enabled))
+ return IRQ_NONE;
+
+ if (irq_status & MTK_NOR_IRQ_DMA) {
+ complete(&sp->op_done);
+ writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static size_t mtk_max_msg_size(struct spi_device *spi)
+{
+ return MTK_NOR_PRG_MAX_SIZE;
+}
+
+static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
+ .adjust_op_size = mtk_nor_adjust_op_size,
+ .supports_op = mtk_nor_supports_op,
+ .exec_op = mtk_nor_exec_op
+};
+
+static const struct mtk_nor_caps mtk_nor_caps_mt8173 = {
+ .dma_bits = 32,
+ .extra_dummy_bit = 0,
+};
+
+static const struct mtk_nor_caps mtk_nor_caps_mt8186 = {
+ .dma_bits = 32,
+ .extra_dummy_bit = 1,
+};
+
+static const struct mtk_nor_caps mtk_nor_caps_mt8192 = {
+ .dma_bits = 36,
+ .extra_dummy_bit = 0,
+};
+
+static const struct of_device_id mtk_nor_match[] = {
+ { .compatible = "mediatek,mt8173-nor", .data = &mtk_nor_caps_mt8173 },
+ { .compatible = "mediatek,mt8186-nor", .data = &mtk_nor_caps_mt8186 },
+ { .compatible = "mediatek,mt8192-nor", .data = &mtk_nor_caps_mt8192 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_nor_match);
+
+static int mtk_nor_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct mtk_nor *sp;
+ struct mtk_nor_caps *caps;
+ void __iomem *base;
+ struct clk *spi_clk, *ctlr_clk, *axi_clk, *axi_s_clk;
+ int ret, irq;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(spi_clk))
+ return PTR_ERR(spi_clk);
+
+ ctlr_clk = devm_clk_get(&pdev->dev, "sf");
+ if (IS_ERR(ctlr_clk))
+ return PTR_ERR(ctlr_clk);
+
+ axi_clk = devm_clk_get_optional(&pdev->dev, "axi");
+ if (IS_ERR(axi_clk))
+ return PTR_ERR(axi_clk);
+
+ axi_s_clk = devm_clk_get_optional(&pdev->dev, "axi_s");
+ if (IS_ERR(axi_s_clk))
+ return PTR_ERR(axi_s_clk);
+
+ caps = (struct mtk_nor_caps *)of_device_get_match_data(&pdev->dev);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(caps->dma_bits));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set dma mask(%u)\n", caps->dma_bits);
+ return ret;
+ }
+
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp));
+ if (!ctlr) {
+ dev_err(&pdev->dev, "failed to allocate spi controller\n");
+ return -ENOMEM;
+ }
+
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->max_message_size = mtk_max_msg_size;
+ ctlr->mem_ops = &mtk_nor_mem_ops;
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->num_chipselect = 1;
+ ctlr->setup = mtk_nor_setup;
+ ctlr->transfer_one_message = mtk_nor_transfer_one_message;
+ ctlr->auto_runtime_pm = true;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+
+ sp = spi_controller_get_devdata(ctlr);
+ sp->base = base;
+ sp->has_irq = false;
+ sp->wbuf_en = false;
+ sp->ctlr = ctlr;
+ sp->dev = &pdev->dev;
+ sp->spi_clk = spi_clk;
+ sp->ctlr_clk = ctlr_clk;
+ sp->axi_clk = axi_clk;
+ sp->axi_s_clk = axi_s_clk;
+ sp->caps = caps;
+ sp->high_dma = caps->dma_bits > 32;
+ sp->buffer = dmam_alloc_coherent(&pdev->dev,
+ MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
+ &sp->buffer_dma, GFP_KERNEL);
+ if (!sp->buffer)
+ return -ENOMEM;
+
+ if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) {
+ dev_err(sp->dev, "misaligned allocation of internal buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = mtk_nor_enable_clk(sp);
+ if (ret < 0)
+ return ret;
+
+ sp->spi_freq = clk_get_rate(sp->spi_clk);
+
+ mtk_nor_init(sp);
+
+ irq = platform_get_irq_optional(pdev, 0);
+
+ if (irq < 0) {
+ dev_warn(sp->dev, "IRQ not available.");
+ } else {
+ ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0,
+ pdev->name, sp);
+ if (ret < 0) {
+ dev_warn(sp->dev, "failed to request IRQ.");
+ } else {
+ init_completion(&sp->op_done);
+ sp->has_irq = true;
+ }
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret < 0)
+ goto err_probe;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq);
+
+ return 0;
+
+err_probe:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
+ mtk_nor_disable_clk(sp);
+
+ return ret;
+}
+
+static int mtk_nor_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
+ mtk_nor_disable_clk(sp);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_nor_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+
+ mtk_nor_disable_clk(sp);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_nor_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+
+ return mtk_nor_enable_clk(sp);
+}
+
+static int __maybe_unused mtk_nor_suspend(struct device *dev)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused mtk_nor_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ mtk_nor_init(sp);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mtk_nor_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend,
+ mtk_nor_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend, mtk_nor_resume)
+};
+
+static struct platform_driver mtk_nor_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mtk_nor_match,
+ .pm = &mtk_nor_pm_ops,
+ },
+ .probe = mtk_nor_probe,
+ .remove = mtk_nor_remove,
+};
+
+module_platform_driver(mtk_nor_driver);
+
+MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
+MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
new file mode 100644
index 000000000..d66bf9762
--- /dev/null
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -0,0 +1,1472 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for the SPI-NAND mode of Mediatek NAND Flash Interface
+//
+// Copyright (c) 2022 Chuanhong Guo <gch981213@gmail.com>
+//
+// This driver is based on the SPI-NAND mtd driver from Mediatek SDK:
+//
+// Copyright (C) 2020 MediaTek Inc.
+// Author: Weijie Gao <weijie.gao@mediatek.com>
+//
+// This controller organize the page data as several interleaved sectors
+// like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size)
+// +---------+------+------+---------+------+------+-----+
+// | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... |
+// +---------+------+------+---------+------+------+-----+
+// With auto-format turned on, DMA only returns this part:
+// +---------+---------+-----+
+// | Sector1 | Sector2 | ... |
+// +---------+---------+-----+
+// The FDM data will be filled to the registers, and ECC parity data isn't
+// accessible.
+// With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA
+// in it's original order shown in the first table. ECC can't be turned on when
+// auto-format is off.
+//
+// However, Linux SPI-NAND driver expects the data returned as:
+// +------+-----+
+// | Page | OOB |
+// +------+-----+
+// where the page data is continuously stored instead of interleaved.
+// So we assume all instructions matching the page_op template between ECC
+// prepare_io_req and finish_io_req are for page cache r/w.
+// Here's how this spi-mem driver operates when reading:
+// 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off).
+// 2. Perform page ops and let the controller fill the DMA bounce buffer with
+// de-interleaved sector data and set FDM registers.
+// 3. Return the data as:
+// +---------+---------+-----+------+------+-----+
+// | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... |
+// +---------+---------+-----+------+------+-----+
+// 4. For other matching spi_mem ops outside a prepare/finish_io_req pair,
+// read the data with auto-format off into the bounce buffer and copy
+// needed data to the buffer specified in the request.
+//
+// Write requests operates in a similar manner.
+// As a limitation of this strategy, we won't be able to access any ECC parity
+// data at all in Linux.
+//
+// Here's the bad block mark situation on MTK chips:
+// In older chips like mt7622, MTK uses the first FDM byte in the first sector
+// as the bad block mark. After de-interleaving, this byte appears at [pagesize]
+// in the returned data, which is the BBM position expected by kernel. However,
+// the conventional bad block mark is the first byte of the OOB, which is part
+// of the last sector data in the interleaved layout. Instead of fixing their
+// hardware, MTK decided to address this inconsistency in software. On these
+// later chips, the BootROM expects the following:
+// 1. The [pagesize] byte on a nand page is used as BBM, which will appear at
+// (page_size - (nsectors - 1) * spare_size) in the DMA buffer.
+// 2. The original byte stored at that position in the DMA buffer will be stored
+// as the first byte of the FDM section in the last sector.
+// We can't disagree with the BootROM, so after de-interleaving, we need to
+// perform the following swaps in read:
+// 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size],
+// which is the expected BBM position by kernel.
+// 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to
+// [page_size - (nsectors - 1) * spare_size]
+// Similarly, when writing, we need to perform swaps in the other direction.
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/of_platform.h>
+#include <linux/mtd/nand-ecc-mtk.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/mtd/nand.h>
+
+// NFI registers
+#define NFI_CNFG 0x000
+#define CNFG_OP_MODE_S 12
+#define CNFG_OP_MODE_CUST 6
+#define CNFG_OP_MODE_PROGRAM 3
+#define CNFG_AUTO_FMT_EN BIT(9)
+#define CNFG_HW_ECC_EN BIT(8)
+#define CNFG_DMA_BURST_EN BIT(2)
+#define CNFG_READ_MODE BIT(1)
+#define CNFG_DMA_MODE BIT(0)
+
+#define NFI_PAGEFMT 0x0004
+#define NFI_SPARE_SIZE_LS_S 16
+#define NFI_FDM_ECC_NUM_S 12
+#define NFI_FDM_NUM_S 8
+#define NFI_SPARE_SIZE_S 4
+#define NFI_SEC_SEL_512 BIT(2)
+#define NFI_PAGE_SIZE_S 0
+#define NFI_PAGE_SIZE_512_2K 0
+#define NFI_PAGE_SIZE_2K_4K 1
+#define NFI_PAGE_SIZE_4K_8K 2
+#define NFI_PAGE_SIZE_8K_16K 3
+
+#define NFI_CON 0x008
+#define CON_SEC_NUM_S 12
+#define CON_BWR BIT(9)
+#define CON_BRD BIT(8)
+#define CON_NFI_RST BIT(1)
+#define CON_FIFO_FLUSH BIT(0)
+
+#define NFI_INTR_EN 0x010
+#define NFI_INTR_STA 0x014
+#define NFI_IRQ_INTR_EN BIT(31)
+#define NFI_IRQ_CUS_READ BIT(8)
+#define NFI_IRQ_CUS_PG BIT(7)
+
+#define NFI_CMD 0x020
+#define NFI_CMD_DUMMY_READ 0x00
+#define NFI_CMD_DUMMY_WRITE 0x80
+
+#define NFI_STRDATA 0x040
+#define STR_DATA BIT(0)
+
+#define NFI_STA 0x060
+#define NFI_NAND_FSM GENMASK(28, 24)
+#define NFI_FSM GENMASK(19, 16)
+#define READ_EMPTY BIT(12)
+
+#define NFI_FIFOSTA 0x064
+#define FIFO_WR_REMAIN_S 8
+#define FIFO_RD_REMAIN_S 0
+
+#define NFI_ADDRCNTR 0x070
+#define SEC_CNTR GENMASK(16, 12)
+#define SEC_CNTR_S 12
+#define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
+
+#define NFI_STRADDR 0x080
+
+#define NFI_BYTELEN 0x084
+#define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
+
+#define NFI_FDM0L 0x0a0
+#define NFI_FDM0M 0x0a4
+#define NFI_FDML(n) (NFI_FDM0L + (n)*8)
+#define NFI_FDMM(n) (NFI_FDM0M + (n)*8)
+
+#define NFI_DEBUG_CON1 0x220
+#define WBUF_EN BIT(2)
+
+#define NFI_MASTERSTA 0x224
+#define MAS_ADDR GENMASK(11, 9)
+#define MAS_RD GENMASK(8, 6)
+#define MAS_WR GENMASK(5, 3)
+#define MAS_RDDLY GENMASK(2, 0)
+#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
+
+// SNFI registers
+#define SNF_MAC_CTL 0x500
+#define MAC_XIO_SEL BIT(4)
+#define SF_MAC_EN BIT(3)
+#define SF_TRIG BIT(2)
+#define WIP_READY BIT(1)
+#define WIP BIT(0)
+
+#define SNF_MAC_OUTL 0x504
+#define SNF_MAC_INL 0x508
+
+#define SNF_RD_CTL2 0x510
+#define DATA_READ_DUMMY_S 8
+#define DATA_READ_MAX_DUMMY 0xf
+#define DATA_READ_CMD_S 0
+
+#define SNF_RD_CTL3 0x514
+
+#define SNF_PG_CTL1 0x524
+#define PG_LOAD_CMD_S 8
+
+#define SNF_PG_CTL2 0x528
+
+#define SNF_MISC_CTL 0x538
+#define SW_RST BIT(28)
+#define FIFO_RD_LTC_S 25
+#define PG_LOAD_X4_EN BIT(20)
+#define DATA_READ_MODE_S 16
+#define DATA_READ_MODE GENMASK(18, 16)
+#define DATA_READ_MODE_X1 0
+#define DATA_READ_MODE_X2 1
+#define DATA_READ_MODE_X4 2
+#define DATA_READ_MODE_DUAL 5
+#define DATA_READ_MODE_QUAD 6
+#define PG_LOAD_CUSTOM_EN BIT(7)
+#define DATARD_CUSTOM_EN BIT(6)
+#define CS_DESELECT_CYC_S 0
+
+#define SNF_MISC_CTL2 0x53c
+#define PROGRAM_LOAD_BYTE_NUM_S 16
+#define READ_DATA_BYTE_NUM_S 11
+
+#define SNF_DLY_CTL3 0x548
+#define SFCK_SAM_DLY_S 0
+
+#define SNF_STA_CTL1 0x550
+#define CUS_PG_DONE BIT(28)
+#define CUS_READ_DONE BIT(27)
+#define SPI_STATE_S 0
+#define SPI_STATE GENMASK(3, 0)
+
+#define SNF_CFG 0x55c
+#define SPI_MODE BIT(0)
+
+#define SNF_GPRAM 0x800
+#define SNF_GPRAM_SIZE 0xa0
+
+#define SNFI_POLL_INTERVAL 1000000
+
+static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 };
+
+struct mtk_snand_caps {
+ u16 sector_size;
+ u16 max_sectors;
+ u16 fdm_size;
+ u16 fdm_ecc_size;
+ u16 fifo_size;
+
+ bool bbm_swap;
+ bool empty_page_check;
+ u32 mastersta_mask;
+
+ const u8 *spare_sizes;
+ u32 num_spare_size;
+};
+
+static const struct mtk_snand_caps mt7622_snand_caps = {
+ .sector_size = 512,
+ .max_sectors = 8,
+ .fdm_size = 8,
+ .fdm_ecc_size = 1,
+ .fifo_size = 32,
+ .bbm_swap = false,
+ .empty_page_check = false,
+ .mastersta_mask = NFI_MASTERSTA_MASK_7622,
+ .spare_sizes = mt7622_spare_sizes,
+ .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
+};
+
+static const struct mtk_snand_caps mt7629_snand_caps = {
+ .sector_size = 512,
+ .max_sectors = 8,
+ .fdm_size = 8,
+ .fdm_ecc_size = 1,
+ .fifo_size = 32,
+ .bbm_swap = true,
+ .empty_page_check = false,
+ .mastersta_mask = NFI_MASTERSTA_MASK_7622,
+ .spare_sizes = mt7622_spare_sizes,
+ .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
+};
+
+struct mtk_snand_conf {
+ size_t page_size;
+ size_t oob_size;
+ u8 nsectors;
+ u8 spare_size;
+};
+
+struct mtk_snand {
+ struct spi_controller *ctlr;
+ struct device *dev;
+ struct clk *nfi_clk;
+ struct clk *pad_clk;
+ void __iomem *nfi_base;
+ int irq;
+ struct completion op_done;
+ const struct mtk_snand_caps *caps;
+ struct mtk_ecc_config *ecc_cfg;
+ struct mtk_ecc *ecc;
+ struct mtk_snand_conf nfi_cfg;
+ struct mtk_ecc_stats ecc_stats;
+ struct nand_ecc_engine ecc_eng;
+ bool autofmt;
+ u8 *buf;
+ size_t buf_len;
+};
+
+static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+
+ return container_of(eng, struct mtk_snand, ecc_eng);
+}
+
+static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size)
+{
+ if (snf->buf_len >= size)
+ return 0;
+ kfree(snf->buf);
+ snf->buf = kmalloc(size, GFP_KERNEL);
+ if (!snf->buf)
+ return -ENOMEM;
+ snf->buf_len = size;
+ memset(snf->buf, 0xff, snf->buf_len);
+ return 0;
+}
+
+static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg)
+{
+ return readl(snf->nfi_base + reg);
+}
+
+static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val)
+{
+ writel(val, snf->nfi_base + reg);
+}
+
+static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val)
+{
+ writew(val, snf->nfi_base + reg);
+}
+
+static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set)
+{
+ u32 val;
+
+ val = readl(snf->nfi_base + reg);
+ val &= ~clr;
+ val |= set;
+ writel(val, snf->nfi_base + reg);
+}
+
+static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len)
+{
+ u32 i, val = 0, es = sizeof(u32);
+
+ for (i = reg; i < reg + len; i++) {
+ if (i == reg || i % es == 0)
+ val = nfi_read32(snf, i & ~(es - 1));
+
+ *data++ = (u8)(val >> (8 * (i % es)));
+ }
+}
+
+static int mtk_nfi_reset(struct mtk_snand *snf)
+{
+ u32 val, fifo_mask;
+ int ret;
+
+ nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
+
+ ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
+ !(val & snf->caps->mastersta_mask), 0,
+ SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "NFI master is still busy after reset\n");
+ return ret;
+ }
+
+ ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val,
+ !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
+ SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "Failed to reset NFI\n");
+ return ret;
+ }
+
+ fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) |
+ ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S);
+ ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
+ !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "NFI FIFOs are not empty\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_snand_mac_reset(struct mtk_snand *snf)
+{
+ int ret;
+ u32 val;
+
+ nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
+
+ ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
+ !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
+ if (ret)
+ dev_err(snf->dev, "Failed to reset SNFI MAC\n");
+
+ nfi_write32(snf, SNF_MISC_CTL,
+ (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S));
+
+ return ret;
+}
+
+static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen)
+{
+ int ret;
+ u32 val;
+
+ nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
+ nfi_write32(snf, SNF_MAC_OUTL, outlen);
+ nfi_write32(snf, SNF_MAC_INL, inlen);
+
+ nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
+
+ ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
+ val & WIP_READY, 0, SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "Timed out waiting for WIP_READY\n");
+ goto cleanup;
+ }
+
+ ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP),
+ 0, SNFI_POLL_INTERVAL);
+ if (ret)
+ dev_err(snf->dev, "Timed out waiting for WIP cleared\n");
+
+cleanup:
+ nfi_write32(snf, SNF_MAC_CTL, 0);
+
+ return ret;
+}
+
+static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op)
+{
+ u32 rx_len = 0;
+ u32 reg_offs = 0;
+ u32 val = 0;
+ const u8 *tx_buf = NULL;
+ u8 *rx_buf = NULL;
+ int i, ret;
+ u8 b;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ rx_len = op->data.nbytes;
+ rx_buf = op->data.buf.in;
+ } else {
+ tx_buf = op->data.buf.out;
+ }
+
+ mtk_snand_mac_reset(snf);
+
+ for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) {
+ b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff;
+ val |= b << (8 * (reg_offs % 4));
+ if (reg_offs % 4 == 3) {
+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
+ val = 0;
+ }
+ }
+
+ for (i = 0; i < op->addr.nbytes; i++, reg_offs++) {
+ b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff;
+ val |= b << (8 * (reg_offs % 4));
+ if (reg_offs % 4 == 3) {
+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
+ val = 0;
+ }
+ }
+
+ for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) {
+ if (reg_offs % 4 == 3) {
+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
+ val = 0;
+ }
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ for (i = 0; i < op->data.nbytes; i++, reg_offs++) {
+ val |= tx_buf[i] << (8 * (reg_offs % 4));
+ if (reg_offs % 4 == 3) {
+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
+ val = 0;
+ }
+ }
+ }
+
+ if (reg_offs % 4)
+ nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val);
+
+ for (i = 0; i < reg_offs; i += 4)
+ dev_dbg(snf->dev, "%d: %08X", i,
+ nfi_read32(snf, SNF_GPRAM + i));
+
+ dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len);
+
+ ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len);
+ if (ret)
+ return ret;
+
+ if (!rx_len)
+ return 0;
+
+ nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len);
+ return 0;
+}
+
+static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size,
+ u32 oob_size)
+{
+ int spare_idx = -1;
+ u32 spare_size, spare_size_shift, pagesize_idx;
+ u32 sector_size_512;
+ u8 nsectors;
+ int i;
+
+ // skip if it's already configured as required.
+ if (snf->nfi_cfg.page_size == page_size &&
+ snf->nfi_cfg.oob_size == oob_size)
+ return 0;
+
+ nsectors = page_size / snf->caps->sector_size;
+ if (nsectors > snf->caps->max_sectors) {
+ dev_err(snf->dev, "too many sectors required.\n");
+ goto err;
+ }
+
+ if (snf->caps->sector_size == 512) {
+ sector_size_512 = NFI_SEC_SEL_512;
+ spare_size_shift = NFI_SPARE_SIZE_S;
+ } else {
+ sector_size_512 = 0;
+ spare_size_shift = NFI_SPARE_SIZE_LS_S;
+ }
+
+ switch (page_size) {
+ case SZ_512:
+ pagesize_idx = NFI_PAGE_SIZE_512_2K;
+ break;
+ case SZ_2K:
+ if (snf->caps->sector_size == 512)
+ pagesize_idx = NFI_PAGE_SIZE_2K_4K;
+ else
+ pagesize_idx = NFI_PAGE_SIZE_512_2K;
+ break;
+ case SZ_4K:
+ if (snf->caps->sector_size == 512)
+ pagesize_idx = NFI_PAGE_SIZE_4K_8K;
+ else
+ pagesize_idx = NFI_PAGE_SIZE_2K_4K;
+ break;
+ case SZ_8K:
+ if (snf->caps->sector_size == 512)
+ pagesize_idx = NFI_PAGE_SIZE_8K_16K;
+ else
+ pagesize_idx = NFI_PAGE_SIZE_4K_8K;
+ break;
+ case SZ_16K:
+ pagesize_idx = NFI_PAGE_SIZE_8K_16K;
+ break;
+ default:
+ dev_err(snf->dev, "unsupported page size.\n");
+ goto err;
+ }
+
+ spare_size = oob_size / nsectors;
+ // If we're using the 1KB sector size, HW will automatically double the
+ // spare size. We should only use half of the value in this case.
+ if (snf->caps->sector_size == 1024)
+ spare_size /= 2;
+
+ for (i = snf->caps->num_spare_size - 1; i >= 0; i--) {
+ if (snf->caps->spare_sizes[i] <= spare_size) {
+ spare_size = snf->caps->spare_sizes[i];
+ if (snf->caps->sector_size == 1024)
+ spare_size *= 2;
+ spare_idx = i;
+ break;
+ }
+ }
+
+ if (spare_idx < 0) {
+ dev_err(snf->dev, "unsupported spare size: %u\n", spare_size);
+ goto err;
+ }
+
+ nfi_write32(snf, NFI_PAGEFMT,
+ (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
+ (snf->caps->fdm_size << NFI_FDM_NUM_S) |
+ (spare_idx << spare_size_shift) |
+ (pagesize_idx << NFI_PAGE_SIZE_S) |
+ sector_size_512);
+
+ snf->nfi_cfg.page_size = page_size;
+ snf->nfi_cfg.oob_size = oob_size;
+ snf->nfi_cfg.nsectors = nsectors;
+ snf->nfi_cfg.spare_size = spare_size;
+
+ dev_dbg(snf->dev, "page format: (%u + %u) * %u\n",
+ snf->caps->sector_size, spare_size, nsectors);
+ return snand_prepare_bouncebuf(snf, page_size + oob_size);
+err:
+ dev_err(snf->dev, "page size %u + %u is not supported\n", page_size,
+ oob_size);
+ return -EOPNOTSUPP;
+}
+
+static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc)
+{
+ // ECC area is not accessible
+ return -ERANGE;
+}
+
+static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mtk_snand *ms = nand_to_mtk_snand(nand);
+
+ if (section >= ms->nfi_cfg.nsectors)
+ return -ERANGE;
+
+ oobfree->length = ms->caps->fdm_size - 1;
+ oobfree->offset = section * ms->caps->fdm_size + 1;
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
+ .ecc = mtk_snand_ooblayout_ecc,
+ .free = mtk_snand_ooblayout_free,
+};
+
+static int mtk_snand_ecc_init_ctx(struct nand_device *nand)
+{
+ struct mtk_snand *snf = nand_to_mtk_snand(nand);
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct nand_ecc_props *reqs = &nand->ecc.requirements;
+ struct nand_ecc_props *user = &nand->ecc.user_conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int step_size = 0, strength = 0, desired_correction = 0, steps;
+ bool ecc_user = false;
+ int ret;
+ u32 parity_bits, max_ecc_bytes;
+ struct mtk_ecc_config *ecc_cfg;
+
+ ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
+ nand->memorg.oobsize);
+ if (ret)
+ return ret;
+
+ ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
+ if (!ecc_cfg)
+ return -ENOMEM;
+
+ nand->ecc.ctx.priv = ecc_cfg;
+
+ if (user->step_size && user->strength) {
+ step_size = user->step_size;
+ strength = user->strength;
+ ecc_user = true;
+ } else if (reqs->step_size && reqs->strength) {
+ step_size = reqs->step_size;
+ strength = reqs->strength;
+ }
+
+ if (step_size && strength) {
+ steps = mtd->writesize / step_size;
+ desired_correction = steps * strength;
+ strength = desired_correction / snf->nfi_cfg.nsectors;
+ }
+
+ ecc_cfg->mode = ECC_NFI_MODE;
+ ecc_cfg->sectors = snf->nfi_cfg.nsectors;
+ ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size;
+
+ // calculate the max possible strength under current page format
+ parity_bits = mtk_ecc_get_parity_bits(snf->ecc);
+ max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size;
+ ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits;
+ mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength);
+
+ // if there's a user requested strength, find the minimum strength that
+ // meets the requirement. Otherwise use the maximum strength which is
+ // expected by BootROM.
+ if (ecc_user && strength) {
+ u32 s_next = ecc_cfg->strength - 1;
+
+ while (1) {
+ mtk_ecc_adjust_strength(snf->ecc, &s_next);
+ if (s_next >= ecc_cfg->strength)
+ break;
+ if (s_next < strength)
+ break;
+ s_next = ecc_cfg->strength - 1;
+ }
+ }
+
+ mtd_set_ooblayout(mtd, &mtk_snand_ooblayout);
+
+ conf->step_size = snf->caps->sector_size;
+ conf->strength = ecc_cfg->strength;
+
+ if (ecc_cfg->strength < strength)
+ dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n",
+ strength);
+ dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n",
+ ecc_cfg->strength, snf->caps->sector_size);
+
+ return 0;
+}
+
+static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
+
+ kfree(ecc_cfg);
+}
+
+static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mtk_snand *snf = nand_to_mtk_snand(nand);
+ struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
+ int ret;
+
+ ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
+ nand->memorg.oobsize);
+ if (ret)
+ return ret;
+ snf->autofmt = true;
+ snf->ecc_cfg = ecc_cfg;
+ return 0;
+}
+
+static int mtk_snand_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mtk_snand *snf = nand_to_mtk_snand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ snf->ecc_cfg = NULL;
+ snf->autofmt = false;
+ if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ))
+ return 0;
+
+ if (snf->ecc_stats.failed)
+ mtd->ecc_stats.failed += snf->ecc_stats.failed;
+ mtd->ecc_stats.corrected += snf->ecc_stats.corrected;
+ return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips;
+}
+
+static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = {
+ .init_ctx = mtk_snand_ecc_init_ctx,
+ .cleanup_ctx = mtk_snand_ecc_cleanup_ctx,
+ .prepare_io_req = mtk_snand_ecc_prepare_io_req,
+ .finish_io_req = mtk_snand_ecc_finish_io_req,
+};
+
+static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf)
+{
+ u32 vall, valm;
+ u8 *oobptr = buf;
+ int i, j;
+
+ for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
+ vall = nfi_read32(snf, NFI_FDML(i));
+ valm = nfi_read32(snf, NFI_FDMM(i));
+
+ for (j = 0; j < snf->caps->fdm_size; j++)
+ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
+
+ oobptr += snf->caps->fdm_size;
+ }
+}
+
+static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf)
+{
+ u32 fdm_size = snf->caps->fdm_size;
+ const u8 *oobptr = buf;
+ u32 vall, valm;
+ int i, j;
+
+ for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
+ vall = 0;
+ valm = 0;
+
+ for (j = 0; j < 8; j++) {
+ if (j < 4)
+ vall |= (j < fdm_size ? oobptr[j] : 0xff)
+ << (j * 8);
+ else
+ valm |= (j < fdm_size ? oobptr[j] : 0xff)
+ << ((j - 4) * 8);
+ }
+
+ nfi_write32(snf, NFI_FDML(i), vall);
+ nfi_write32(snf, NFI_FDMM(i), valm);
+
+ oobptr += fdm_size;
+ }
+}
+
+static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf)
+{
+ u32 buf_bbm_pos, fdm_bbm_pos;
+
+ if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
+ return;
+
+ // swap [pagesize] byte on nand with the first fdm byte
+ // in the last sector.
+ buf_bbm_pos = snf->nfi_cfg.page_size -
+ (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size;
+ fdm_bbm_pos = snf->nfi_cfg.page_size +
+ (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
+
+ swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]);
+}
+
+static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
+{
+ u32 fdm_bbm_pos1, fdm_bbm_pos2;
+
+ if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
+ return;
+
+ // swap the first fdm byte in the first and the last sector.
+ fdm_bbm_pos1 = snf->nfi_cfg.page_size;
+ fdm_bbm_pos2 = snf->nfi_cfg.page_size +
+ (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
+ swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]);
+}
+
+static int mtk_snand_read_page_cache(struct mtk_snand *snf,
+ const struct spi_mem_op *op)
+{
+ u8 *buf = snf->buf;
+ u8 *buf_fdm = buf + snf->nfi_cfg.page_size;
+ // the address part to be sent by the controller
+ u32 op_addr = op->addr.val;
+ // where to start copying data from bounce buffer
+ u32 rd_offset = 0;
+ u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth);
+ u32 op_mode = 0;
+ u32 dma_len = snf->buf_len;
+ int ret = 0;
+ u32 rd_mode, rd_bytes, val;
+ dma_addr_t buf_dma;
+
+ if (snf->autofmt) {
+ u32 last_bit;
+ u32 mask;
+
+ dma_len = snf->nfi_cfg.page_size;
+ op_mode = CNFG_AUTO_FMT_EN;
+ if (op->data.ecc)
+ op_mode |= CNFG_HW_ECC_EN;
+ // extract the plane bit:
+ // Find the highest bit set in (pagesize+oobsize).
+ // Bits higher than that in op->addr are kept and sent over SPI
+ // Lower bits are used as an offset for copying data from DMA
+ // bounce buffer.
+ last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
+ mask = (1 << last_bit) - 1;
+ rd_offset = op_addr & mask;
+ op_addr &= ~mask;
+
+ // check if we can dma to the caller memory
+ if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size)
+ buf = op->data.buf.in;
+ }
+ mtk_snand_mac_reset(snf);
+ mtk_nfi_reset(snf);
+
+ // command and dummy cycles
+ nfi_write32(snf, SNF_RD_CTL2,
+ (dummy_clk << DATA_READ_DUMMY_S) |
+ (op->cmd.opcode << DATA_READ_CMD_S));
+
+ // read address
+ nfi_write32(snf, SNF_RD_CTL3, op_addr);
+
+ // Set read op_mode
+ if (op->data.buswidth == 4)
+ rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD :
+ DATA_READ_MODE_X4;
+ else if (op->data.buswidth == 2)
+ rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL :
+ DATA_READ_MODE_X2;
+ else
+ rd_mode = DATA_READ_MODE_X1;
+ rd_mode <<= DATA_READ_MODE_S;
+ nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
+ rd_mode | DATARD_CUSTOM_EN);
+
+ // Set bytes to read
+ rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
+ snf->nfi_cfg.nsectors;
+ nfi_write32(snf, SNF_MISC_CTL2,
+ (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes);
+
+ // NFI read prepare
+ nfi_write16(snf, NFI_CNFG,
+ (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN |
+ CNFG_READ_MODE | CNFG_DMA_MODE | op_mode);
+
+ nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
+
+ buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(snf->dev, buf_dma);
+ if (ret) {
+ dev_err(snf->dev, "DMA mapping failed.\n");
+ goto cleanup;
+ }
+ nfi_write32(snf, NFI_STRADDR, buf_dma);
+ if (op->data.ecc) {
+ snf->ecc_cfg->op = ECC_DECODE;
+ ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
+ if (ret)
+ goto cleanup_dma;
+ }
+ // Prepare for custom read interrupt
+ nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
+ reinit_completion(&snf->op_done);
+
+ // Trigger NFI into custom mode
+ nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
+
+ // Start DMA read
+ nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
+ nfi_write16(snf, NFI_STRDATA, STR_DATA);
+
+ if (!wait_for_completion_timeout(
+ &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
+ dev_err(snf->dev, "DMA timed out for reading from cache.\n");
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ // Wait for BUS_SEC_CNTR returning expected value
+ ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
+ BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
+ SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n");
+ goto cleanup2;
+ }
+
+ // Wait for bus becoming idle
+ ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
+ !(val & snf->caps->mastersta_mask), 0,
+ SNFI_POLL_INTERVAL);
+ if (ret) {
+ dev_err(snf->dev, "Timed out waiting for bus becoming idle\n");
+ goto cleanup2;
+ }
+
+ if (op->data.ecc) {
+ ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE);
+ if (ret) {
+ dev_err(snf->dev, "wait ecc done timeout\n");
+ goto cleanup2;
+ }
+ // save status before disabling ecc
+ mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats,
+ snf->nfi_cfg.nsectors);
+ }
+
+ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
+
+ if (snf->autofmt) {
+ mtk_snand_read_fdm(snf, buf_fdm);
+ if (snf->caps->bbm_swap) {
+ mtk_snand_bm_swap(snf, buf);
+ mtk_snand_fdm_bm_swap(snf);
+ }
+ }
+
+ // copy data back
+ if (nfi_read32(snf, NFI_STA) & READ_EMPTY) {
+ memset(op->data.buf.in, 0xff, op->data.nbytes);
+ snf->ecc_stats.bitflips = 0;
+ snf->ecc_stats.failed = 0;
+ snf->ecc_stats.corrected = 0;
+ } else {
+ if (buf == op->data.buf.in) {
+ u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size;
+ u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size;
+
+ if (req_left)
+ memcpy(op->data.buf.in + snf->nfi_cfg.page_size,
+ buf_fdm,
+ cap_len < req_left ? cap_len : req_left);
+ } else if (rd_offset < snf->buf_len) {
+ u32 cap_len = snf->buf_len - rd_offset;
+
+ if (op->data.nbytes < cap_len)
+ cap_len = op->data.nbytes;
+ memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len);
+ }
+ }
+cleanup2:
+ if (op->data.ecc)
+ mtk_ecc_disable(snf->ecc);
+cleanup_dma:
+ // unmap dma only if any error happens. (otherwise it's done before
+ // data copying)
+ if (ret)
+ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
+cleanup:
+ // Stop read
+ nfi_write32(snf, NFI_CON, 0);
+ nfi_write16(snf, NFI_CNFG, 0);
+
+ // Clear SNF done flag
+ nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
+ nfi_write32(snf, SNF_STA_CTL1, 0);
+
+ // Disable interrupt
+ nfi_read32(snf, NFI_INTR_STA);
+ nfi_write32(snf, NFI_INTR_EN, 0);
+
+ nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
+ return ret;
+}
+
+static int mtk_snand_write_page_cache(struct mtk_snand *snf,
+ const struct spi_mem_op *op)
+{
+ // the address part to be sent by the controller
+ u32 op_addr = op->addr.val;
+ // where to start copying data from bounce buffer
+ u32 wr_offset = 0;
+ u32 op_mode = 0;
+ int ret = 0;
+ u32 wr_mode = 0;
+ u32 dma_len = snf->buf_len;
+ u32 wr_bytes, val;
+ size_t cap_len;
+ dma_addr_t buf_dma;
+
+ if (snf->autofmt) {
+ u32 last_bit;
+ u32 mask;
+
+ dma_len = snf->nfi_cfg.page_size;
+ op_mode = CNFG_AUTO_FMT_EN;
+ if (op->data.ecc)
+ op_mode |= CNFG_HW_ECC_EN;
+
+ last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
+ mask = (1 << last_bit) - 1;
+ wr_offset = op_addr & mask;
+ op_addr &= ~mask;
+ }
+ mtk_snand_mac_reset(snf);
+ mtk_nfi_reset(snf);
+
+ if (wr_offset)
+ memset(snf->buf, 0xff, wr_offset);
+
+ cap_len = snf->buf_len - wr_offset;
+ if (op->data.nbytes < cap_len)
+ cap_len = op->data.nbytes;
+ memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len);
+ if (snf->autofmt) {
+ if (snf->caps->bbm_swap) {
+ mtk_snand_fdm_bm_swap(snf);
+ mtk_snand_bm_swap(snf, snf->buf);
+ }
+ mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size);
+ }
+
+ // Command
+ nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S));
+
+ // write address
+ nfi_write32(snf, SNF_PG_CTL2, op_addr);
+
+ // Set read op_mode
+ if (op->data.buswidth == 4)
+ wr_mode = PG_LOAD_X4_EN;
+
+ nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN,
+ wr_mode | PG_LOAD_CUSTOM_EN);
+
+ // Set bytes to write
+ wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
+ snf->nfi_cfg.nsectors;
+ nfi_write32(snf, SNF_MISC_CTL2,
+ (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes);
+
+ // NFI write prepare
+ nfi_write16(snf, NFI_CNFG,
+ (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
+ CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode);
+
+ nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
+ buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(snf->dev, buf_dma);
+ if (ret) {
+ dev_err(snf->dev, "DMA mapping failed.\n");
+ goto cleanup;
+ }
+ nfi_write32(snf, NFI_STRADDR, buf_dma);
+ if (op->data.ecc) {
+ snf->ecc_cfg->op = ECC_ENCODE;
+ ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
+ if (ret)
+ goto cleanup_dma;
+ }
+ // Prepare for custom write interrupt
+ nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
+ reinit_completion(&snf->op_done);
+ ;
+
+ // Trigger NFI into custom mode
+ nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
+
+ // Start DMA write
+ nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
+ nfi_write16(snf, NFI_STRDATA, STR_DATA);
+
+ if (!wait_for_completion_timeout(
+ &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
+ dev_err(snf->dev, "DMA timed out for program load.\n");
+ ret = -ETIMEDOUT;
+ goto cleanup_ecc;
+ }
+
+ // Wait for NFI_SEC_CNTR returning expected value
+ ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
+ NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
+ SNFI_POLL_INTERVAL);
+ if (ret)
+ dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n");
+
+cleanup_ecc:
+ if (op->data.ecc)
+ mtk_ecc_disable(snf->ecc);
+cleanup_dma:
+ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE);
+cleanup:
+ // Stop write
+ nfi_write32(snf, NFI_CON, 0);
+ nfi_write16(snf, NFI_CNFG, 0);
+
+ // Clear SNF done flag
+ nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
+ nfi_write32(snf, SNF_STA_CTL1, 0);
+
+ // Disable interrupt
+ nfi_read32(snf, NFI_INTR_STA);
+ nfi_write32(snf, NFI_INTR_EN, 0);
+
+ nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
+
+ return ret;
+}
+
+/**
+ * mtk_snand_is_page_ops() - check if the op is a controller supported page op.
+ * @op spi-mem op to check
+ *
+ * Check whether op can be executed with read_from_cache or program_load
+ * mode in the controller.
+ * This controller can execute typical Read From Cache and Program Load
+ * instructions found on SPI-NAND with 2-byte address.
+ * DTR and cmd buswidth & nbytes should be checked before calling this.
+ *
+ * Return: true if the op matches the instruction template
+ */
+static bool mtk_snand_is_page_ops(const struct spi_mem_op *op)
+{
+ if (op->addr.nbytes != 2)
+ return false;
+
+ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 &&
+ op->addr.buswidth != 4)
+ return false;
+
+ // match read from page instructions
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ // check dummy cycle first
+ if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth >
+ DATA_READ_MAX_DUMMY)
+ return false;
+ // quad io / quad out
+ if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) &&
+ op->data.buswidth == 4)
+ return true;
+
+ // dual io / dual out
+ if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) &&
+ op->data.buswidth == 2)
+ return true;
+
+ // standard spi
+ if (op->addr.buswidth == 1 && op->data.buswidth == 1)
+ return true;
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ // check dummy cycle first
+ if (op->dummy.nbytes)
+ return false;
+ // program load quad out
+ if (op->addr.buswidth == 1 && op->data.buswidth == 4)
+ return true;
+ // standard spi
+ if (op->addr.buswidth == 1 && op->data.buswidth == 1)
+ return true;
+ }
+ return false;
+}
+
+static bool mtk_snand_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+ if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
+ return false;
+ if (mtk_snand_is_page_ops(op))
+ return true;
+ return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) &&
+ (op->dummy.nbytes == 0 || op->dummy.buswidth == 1) &&
+ (op->data.nbytes == 0 || op->data.buswidth == 1));
+}
+
+static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master);
+ // page ops transfer size must be exactly ((sector_size + spare_size) *
+ // nsectors). Limit the op size if the caller requests more than that.
+ // exec_op will read more than needed and discard the leftover if the
+ // caller requests less data.
+ if (mtk_snand_is_page_ops(op)) {
+ size_t l;
+ // skip adjust_op_size for page ops
+ if (ms->autofmt)
+ return 0;
+ l = ms->caps->sector_size + ms->nfi_cfg.spare_size;
+ l *= ms->nfi_cfg.nsectors;
+ if (op->data.nbytes > l)
+ op->data.nbytes = l;
+ } else {
+ size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
+
+ if (hl >= SNF_GPRAM_SIZE)
+ return -EOPNOTSUPP;
+ if (op->data.nbytes > SNF_GPRAM_SIZE - hl)
+ op->data.nbytes = SNF_GPRAM_SIZE - hl;
+ }
+ return 0;
+}
+
+static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master);
+
+ dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
+ op->addr.val, op->addr.buswidth, op->addr.nbytes,
+ op->data.buswidth, op->data.nbytes);
+ if (mtk_snand_is_page_ops(op)) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ return mtk_snand_read_page_cache(ms, op);
+ else
+ return mtk_snand_write_page_cache(ms, op);
+ } else {
+ return mtk_snand_mac_io(ms, op);
+ }
+}
+
+static const struct spi_controller_mem_ops mtk_snand_mem_ops = {
+ .adjust_op_size = mtk_snand_adjust_op_size,
+ .supports_op = mtk_snand_supports_op,
+ .exec_op = mtk_snand_exec_op,
+};
+
+static const struct spi_controller_mem_caps mtk_snand_mem_caps = {
+ .ecc = true,
+};
+
+static irqreturn_t mtk_snand_irq(int irq, void *id)
+{
+ struct mtk_snand *snf = id;
+ u32 sta, ien;
+
+ sta = nfi_read32(snf, NFI_INTR_STA);
+ ien = nfi_read32(snf, NFI_INTR_EN);
+
+ if (!(sta & ien))
+ return IRQ_NONE;
+
+ nfi_write32(snf, NFI_INTR_EN, 0);
+ complete(&snf->op_done);
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id mtk_snand_ids[] = {
+ { .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps },
+ { .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_snand_ids);
+
+static int mtk_snand_enable_clk(struct mtk_snand *ms)
+{
+ int ret;
+
+ ret = clk_prepare_enable(ms->nfi_clk);
+ if (ret) {
+ dev_err(ms->dev, "unable to enable nfi clk\n");
+ return ret;
+ }
+ ret = clk_prepare_enable(ms->pad_clk);
+ if (ret) {
+ dev_err(ms->dev, "unable to enable pad clk\n");
+ goto err1;
+ }
+ return 0;
+err1:
+ clk_disable_unprepare(ms->nfi_clk);
+ return ret;
+}
+
+static void mtk_snand_disable_clk(struct mtk_snand *ms)
+{
+ clk_disable_unprepare(ms->pad_clk);
+ clk_disable_unprepare(ms->nfi_clk);
+}
+
+static int mtk_snand_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *dev_id;
+ struct spi_controller *ctlr;
+ struct mtk_snand *ms;
+ int ret;
+
+ dev_id = of_match_node(mtk_snand_ids, np);
+ if (!dev_id)
+ return -EINVAL;
+
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms));
+ if (!ctlr)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, ctlr);
+
+ ms = spi_controller_get_devdata(ctlr);
+
+ ms->ctlr = ctlr;
+ ms->caps = dev_id->data;
+
+ ms->ecc = of_mtk_ecc_get(np);
+ if (IS_ERR(ms->ecc))
+ return PTR_ERR(ms->ecc);
+ else if (!ms->ecc)
+ return -ENODEV;
+
+ ms->nfi_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ms->nfi_base)) {
+ ret = PTR_ERR(ms->nfi_base);
+ goto release_ecc;
+ }
+
+ ms->dev = &pdev->dev;
+
+ ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk");
+ if (IS_ERR(ms->nfi_clk)) {
+ ret = PTR_ERR(ms->nfi_clk);
+ dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret);
+ goto release_ecc;
+ }
+
+ ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk");
+ if (IS_ERR(ms->pad_clk)) {
+ ret = PTR_ERR(ms->pad_clk);
+ dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret);
+ goto release_ecc;
+ }
+
+ ret = mtk_snand_enable_clk(ms);
+ if (ret)
+ goto release_ecc;
+
+ init_completion(&ms->op_done);
+
+ ms->irq = platform_get_irq(pdev, 0);
+ if (ms->irq < 0) {
+ ret = ms->irq;
+ goto disable_clk;
+ }
+ ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0,
+ "mtk-snand", ms);
+ if (ret) {
+ dev_err(ms->dev, "failed to request snfi irq\n");
+ goto disable_clk;
+ }
+
+ ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(ms->dev, "failed to set dma mask\n");
+ goto disable_clk;
+ }
+
+ // switch to SNFI mode
+ nfi_write32(ms, SNF_CFG, SPI_MODE);
+
+ // setup an initial page format for ops matching page_cache_op template
+ // before ECC is called.
+ ret = mtk_snand_setup_pagefmt(ms, ms->caps->sector_size,
+ ms->caps->spare_sizes[0]);
+ if (ret) {
+ dev_err(ms->dev, "failed to set initial page format\n");
+ goto disable_clk;
+ }
+
+ // setup ECC engine
+ ms->ecc_eng.dev = &pdev->dev;
+ ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
+ ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops;
+ ms->ecc_eng.priv = ms;
+
+ ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ecc engine.\n");
+ goto disable_clk;
+ }
+
+ ctlr->num_chipselect = 1;
+ ctlr->mem_ops = &mtk_snand_mem_ops;
+ ctlr->mem_caps = &mtk_snand_mem_caps;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_controller failed.\n");
+ goto disable_clk;
+ }
+
+ return 0;
+disable_clk:
+ mtk_snand_disable_clk(ms);
+release_ecc:
+ mtk_ecc_release(ms->ecc);
+ return ret;
+}
+
+static int mtk_snand_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct mtk_snand *ms = spi_controller_get_devdata(ctlr);
+
+ spi_unregister_controller(ctlr);
+ mtk_snand_disable_clk(ms);
+ mtk_ecc_release(ms->ecc);
+ kfree(ms->buf);
+ return 0;
+}
+
+static struct platform_driver mtk_snand_driver = {
+ .probe = mtk_snand_probe,
+ .remove = mtk_snand_remove,
+ .driver = {
+ .name = "mtk-snand",
+ .of_match_table = mtk_snand_ids,
+ },
+};
+
+module_platform_driver(mtk_snand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
+MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
new file mode 100644
index 000000000..0709e987b
--- /dev/null
+++ b/drivers/spi/spi-mux.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// General Purpose SPI multiplexer
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#define SPI_MUX_NO_CS ((unsigned int)-1)
+
+/**
+ * DOC: Driver description
+ *
+ * This driver supports a MUX on an SPI bus. This can be useful when you need
+ * more chip selects than the hardware peripherals support, or than are
+ * available in a particular board setup.
+ *
+ * The driver will create an additional SPI controller. Devices added under the
+ * mux will be handled as 'chip selects' on this controller.
+ */
+
+/**
+ * struct spi_mux_priv - the basic spi_mux structure
+ * @spi: pointer to the device struct attached to the parent
+ * spi controller
+ * @current_cs: The current chip select set in the mux
+ * @child_msg_complete: The mux replaces the complete callback in the child's
+ * message to its own callback; this field is used by the
+ * driver to store the child's callback during a transfer
+ * @child_msg_context: Used to store the child's context to the callback
+ * @child_msg_dev: Used to store the spi_device pointer to the child
+ * @mux: mux_control structure used to provide chip selects for
+ * downstream spi devices
+ */
+struct spi_mux_priv {
+ struct spi_device *spi;
+ unsigned int current_cs;
+
+ void (*child_msg_complete)(void *context);
+ void *child_msg_context;
+ struct spi_device *child_msg_dev;
+ struct mux_control *mux;
+};
+
+/* should not get called when the parent controller is doing a transfer */
+static int spi_mux_select(struct spi_device *spi)
+{
+ struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller);
+ int ret;
+
+ ret = mux_control_select(priv->mux, spi->chip_select);
+ if (ret)
+ return ret;
+
+ if (priv->current_cs == spi->chip_select)
+ return 0;
+
+ dev_dbg(&priv->spi->dev, "setting up the mux for cs %d\n",
+ spi->chip_select);
+
+ /* copy the child device's settings except for the cs */
+ priv->spi->max_speed_hz = spi->max_speed_hz;
+ priv->spi->mode = spi->mode;
+ priv->spi->bits_per_word = spi->bits_per_word;
+
+ priv->current_cs = spi->chip_select;
+
+ return 0;
+}
+
+static int spi_mux_setup(struct spi_device *spi)
+{
+ struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller);
+
+ /*
+ * can be called multiple times, won't do a valid setup now but we will
+ * change the settings when we do a transfer (necessary because we
+ * can't predict from which device it will be anyway)
+ */
+ return spi_setup(priv->spi);
+}
+
+static void spi_mux_complete_cb(void *context)
+{
+ struct spi_mux_priv *priv = (struct spi_mux_priv *)context;
+ struct spi_controller *ctlr = spi_get_drvdata(priv->spi);
+ struct spi_message *m = ctlr->cur_msg;
+
+ m->complete = priv->child_msg_complete;
+ m->context = priv->child_msg_context;
+ m->spi = priv->child_msg_dev;
+ spi_finalize_current_message(ctlr);
+ mux_control_deselect(priv->mux);
+}
+
+static int spi_mux_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *m)
+{
+ struct spi_mux_priv *priv = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = m->spi;
+ int ret;
+
+ ret = spi_mux_select(spi);
+ if (ret)
+ return ret;
+
+ /*
+ * Replace the complete callback, context and spi_device with our own
+ * pointers. Save originals
+ */
+ priv->child_msg_complete = m->complete;
+ priv->child_msg_context = m->context;
+ priv->child_msg_dev = m->spi;
+
+ m->complete = spi_mux_complete_cb;
+ m->context = priv;
+ m->spi = priv->spi;
+
+ /* do the transfer */
+ return spi_async(priv->spi, m);
+}
+
+static int spi_mux_probe(struct spi_device *spi)
+{
+ struct spi_controller *ctlr;
+ struct spi_mux_priv *priv;
+ int ret;
+
+ ctlr = spi_alloc_master(&spi->dev, sizeof(*priv));
+ if (!ctlr)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ctlr);
+ priv = spi_controller_get_devdata(ctlr);
+ priv->spi = spi;
+
+ /*
+ * Increase lockdep class as these lock are taken while the parent bus
+ * already holds their instance's lock.
+ */
+ lockdep_set_subclass(&ctlr->io_mutex, 1);
+ lockdep_set_subclass(&ctlr->add_lock, 1);
+
+ priv->mux = devm_mux_control_get(&spi->dev, NULL);
+ if (IS_ERR(priv->mux)) {
+ ret = dev_err_probe(&spi->dev, PTR_ERR(priv->mux),
+ "failed to get control-mux\n");
+ goto err_put_ctlr;
+ }
+
+ priv->current_cs = SPI_MUX_NO_CS;
+
+ /* supported modes are the same as our parent's */
+ ctlr->mode_bits = spi->controller->mode_bits;
+ ctlr->flags = spi->controller->flags;
+ ctlr->transfer_one_message = spi_mux_transfer_one_message;
+ ctlr->setup = spi_mux_setup;
+ ctlr->num_chipselect = mux_control_states(priv->mux);
+ ctlr->bus_num = -1;
+ ctlr->dev.of_node = spi->dev.of_node;
+ ctlr->must_async = true;
+
+ ret = devm_spi_register_controller(&spi->dev, ctlr);
+ if (ret)
+ goto err_put_ctlr;
+
+ return 0;
+
+err_put_ctlr:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+static const struct spi_device_id spi_mux_id[] = {
+ { "spi-mux" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, spi_mux_id);
+
+static const struct of_device_id spi_mux_of_match[] = {
+ { .compatible = "spi-mux" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_mux_of_match);
+
+static struct spi_driver spi_mux_driver = {
+ .probe = spi_mux_probe,
+ .driver = {
+ .name = "spi-mux",
+ .of_match_table = spi_mux_of_match,
+ },
+ .id_table = spi_mux_id,
+};
+
+module_spi_driver(spi_mux_driver);
+
+MODULE_DESCRIPTION("SPI multiplexer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
new file mode 100644
index 000000000..65be8e085
--- /dev/null
+++ b/drivers/spi/spi-mxic.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 Macronix International Co., Ltd.
+//
+// Authors:
+// Mason Yang <masonccyang@mxic.com.tw>
+// zhengxunli <zhengxunli@mxic.com.tw>
+// Boris Brezillon <boris.brezillon@bootlin.com>
+//
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand-ecc-mxic.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define HC_CFG 0x0
+#define HC_CFG_IF_CFG(x) ((x) << 27)
+#define HC_CFG_DUAL_SLAVE BIT(31)
+#define HC_CFG_INDIVIDUAL BIT(30)
+#define HC_CFG_NIO(x) (((x) / 4) << 27)
+#define HC_CFG_TYPE(s, t) ((t) << (23 + ((s) * 2)))
+#define HC_CFG_TYPE_SPI_NOR 0
+#define HC_CFG_TYPE_SPI_NAND 1
+#define HC_CFG_TYPE_SPI_RAM 2
+#define HC_CFG_TYPE_RAW_NAND 3
+#define HC_CFG_SLV_ACT(x) ((x) << 21)
+#define HC_CFG_CLK_PH_EN BIT(20)
+#define HC_CFG_CLK_POL_INV BIT(19)
+#define HC_CFG_BIG_ENDIAN BIT(18)
+#define HC_CFG_DATA_PASS BIT(17)
+#define HC_CFG_IDLE_SIO_LVL(x) ((x) << 16)
+#define HC_CFG_MAN_START_EN BIT(3)
+#define HC_CFG_MAN_START BIT(2)
+#define HC_CFG_MAN_CS_EN BIT(1)
+#define HC_CFG_MAN_CS_ASSERT BIT(0)
+
+#define INT_STS 0x4
+#define INT_STS_EN 0x8
+#define INT_SIG_EN 0xc
+#define INT_STS_ALL GENMASK(31, 0)
+#define INT_RDY_PIN BIT(26)
+#define INT_RDY_SR BIT(25)
+#define INT_LNR_SUSP BIT(24)
+#define INT_ECC_ERR BIT(17)
+#define INT_CRC_ERR BIT(16)
+#define INT_LWR_DIS BIT(12)
+#define INT_LRD_DIS BIT(11)
+#define INT_SDMA_INT BIT(10)
+#define INT_DMA_FINISH BIT(9)
+#define INT_RX_NOT_FULL BIT(3)
+#define INT_RX_NOT_EMPTY BIT(2)
+#define INT_TX_NOT_FULL BIT(1)
+#define INT_TX_EMPTY BIT(0)
+
+#define HC_EN 0x10
+#define HC_EN_BIT BIT(0)
+
+#define TXD(x) (0x14 + ((x) * 4))
+#define RXD 0x24
+
+#define SS_CTRL(s) (0x30 + ((s) * 4))
+#define LRD_CFG 0x44
+#define LWR_CFG 0x80
+#define RWW_CFG 0x70
+#define OP_READ BIT(23)
+#define OP_DUMMY_CYC(x) ((x) << 17)
+#define OP_ADDR_BYTES(x) ((x) << 14)
+#define OP_CMD_BYTES(x) (((x) - 1) << 13)
+#define OP_OCTA_CRC_EN BIT(12)
+#define OP_DQS_EN BIT(11)
+#define OP_ENHC_EN BIT(10)
+#define OP_PREAMBLE_EN BIT(9)
+#define OP_DATA_DDR BIT(8)
+#define OP_DATA_BUSW(x) ((x) << 6)
+#define OP_ADDR_DDR BIT(5)
+#define OP_ADDR_BUSW(x) ((x) << 3)
+#define OP_CMD_DDR BIT(2)
+#define OP_CMD_BUSW(x) (x)
+#define OP_BUSW_1 0
+#define OP_BUSW_2 1
+#define OP_BUSW_4 2
+#define OP_BUSW_8 3
+
+#define OCTA_CRC 0x38
+#define OCTA_CRC_IN_EN(s) BIT(3 + ((s) * 16))
+#define OCTA_CRC_CHUNK(s, x) ((fls((x) / 32)) << (1 + ((s) * 16)))
+#define OCTA_CRC_OUT_EN(s) BIT(0 + ((s) * 16))
+
+#define ONFI_DIN_CNT(s) (0x3c + (s))
+
+#define LRD_CTRL 0x48
+#define RWW_CTRL 0x74
+#define LWR_CTRL 0x84
+#define LMODE_EN BIT(31)
+#define LMODE_SLV_ACT(x) ((x) << 21)
+#define LMODE_CMD1(x) ((x) << 8)
+#define LMODE_CMD0(x) (x)
+
+#define LRD_ADDR 0x4c
+#define LWR_ADDR 0x88
+#define LRD_RANGE 0x50
+#define LWR_RANGE 0x8c
+
+#define AXI_SLV_ADDR 0x54
+
+#define DMAC_RD_CFG 0x58
+#define DMAC_WR_CFG 0x94
+#define DMAC_CFG_PERIPH_EN BIT(31)
+#define DMAC_CFG_ALLFLUSH_EN BIT(30)
+#define DMAC_CFG_LASTFLUSH_EN BIT(29)
+#define DMAC_CFG_QE(x) (((x) + 1) << 16)
+#define DMAC_CFG_BURST_LEN(x) (((x) + 1) << 12)
+#define DMAC_CFG_BURST_SZ(x) ((x) << 8)
+#define DMAC_CFG_DIR_READ BIT(1)
+#define DMAC_CFG_START BIT(0)
+
+#define DMAC_RD_CNT 0x5c
+#define DMAC_WR_CNT 0x98
+
+#define SDMA_ADDR 0x60
+
+#define DMAM_CFG 0x64
+#define DMAM_CFG_START BIT(31)
+#define DMAM_CFG_CONT BIT(30)
+#define DMAM_CFG_SDMA_GAP(x) (fls((x) / 8192) << 2)
+#define DMAM_CFG_DIR_READ BIT(1)
+#define DMAM_CFG_EN BIT(0)
+
+#define DMAM_CNT 0x68
+
+#define LNR_TIMER_TH 0x6c
+
+#define RDM_CFG0 0x78
+#define RDM_CFG0_POLY(x) (x)
+
+#define RDM_CFG1 0x7c
+#define RDM_CFG1_RDM_EN BIT(31)
+#define RDM_CFG1_SEED(x) (x)
+
+#define LWR_SUSP_CTRL 0x90
+#define LWR_SUSP_CTRL_EN BIT(31)
+
+#define DMAS_CTRL 0x9c
+#define DMAS_CTRL_EN BIT(31)
+#define DMAS_CTRL_DIR_READ BIT(30)
+
+#define DATA_STROB 0xa0
+#define DATA_STROB_EDO_EN BIT(2)
+#define DATA_STROB_INV_POL BIT(1)
+#define DATA_STROB_DELAY_2CYC BIT(0)
+
+#define IDLY_CODE(x) (0xa4 + ((x) * 4))
+#define IDLY_CODE_VAL(x, v) ((v) << (((x) % 4) * 8))
+
+#define GPIO 0xc4
+#define GPIO_PT(x) BIT(3 + ((x) * 16))
+#define GPIO_RESET(x) BIT(2 + ((x) * 16))
+#define GPIO_HOLDB(x) BIT(1 + ((x) * 16))
+#define GPIO_WPB(x) BIT((x) * 16)
+
+#define HC_VER 0xd0
+
+#define HW_TEST(x) (0xe0 + ((x) * 4))
+
+struct mxic_spi {
+ struct device *dev;
+ struct clk *ps_clk;
+ struct clk *send_clk;
+ struct clk *send_dly_clk;
+ void __iomem *regs;
+ u32 cur_speed_hz;
+ struct {
+ void __iomem *map;
+ dma_addr_t dma;
+ size_t size;
+ } linear;
+
+ struct {
+ bool use_pipelined_conf;
+ struct nand_ecc_engine *pipelined_engine;
+ void *ctx;
+ } ecc;
+};
+
+static int mxic_spi_clk_enable(struct mxic_spi *mxic)
+{
+ int ret;
+
+ ret = clk_prepare_enable(mxic->send_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(mxic->send_dly_clk);
+ if (ret)
+ goto err_send_dly_clk;
+
+ return ret;
+
+err_send_dly_clk:
+ clk_disable_unprepare(mxic->send_clk);
+
+ return ret;
+}
+
+static void mxic_spi_clk_disable(struct mxic_spi *mxic)
+{
+ clk_disable_unprepare(mxic->send_clk);
+ clk_disable_unprepare(mxic->send_dly_clk);
+}
+
+static void mxic_spi_set_input_delay_dqs(struct mxic_spi *mxic, u8 idly_code)
+{
+ writel(IDLY_CODE_VAL(0, idly_code) |
+ IDLY_CODE_VAL(1, idly_code) |
+ IDLY_CODE_VAL(2, idly_code) |
+ IDLY_CODE_VAL(3, idly_code),
+ mxic->regs + IDLY_CODE(0));
+ writel(IDLY_CODE_VAL(4, idly_code) |
+ IDLY_CODE_VAL(5, idly_code) |
+ IDLY_CODE_VAL(6, idly_code) |
+ IDLY_CODE_VAL(7, idly_code),
+ mxic->regs + IDLY_CODE(1));
+}
+
+static int mxic_spi_clk_setup(struct mxic_spi *mxic, unsigned long freq)
+{
+ int ret;
+
+ ret = clk_set_rate(mxic->send_clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(mxic->send_dly_clk, freq);
+ if (ret)
+ return ret;
+
+ /*
+ * A constant delay range from 0x0 ~ 0x1F for input delay,
+ * the unit is 78 ps, the max input delay is 2.418 ns.
+ */
+ mxic_spi_set_input_delay_dqs(mxic, 0xf);
+
+ /*
+ * Phase degree = 360 * freq * output-delay
+ * where output-delay is a constant value 1 ns in FPGA.
+ *
+ * Get Phase degree = 360 * freq * 1 ns
+ * = 360 * freq * 1 sec / 1000000000
+ * = 9 * freq / 25000000
+ */
+ ret = clk_set_phase(mxic->send_dly_clk, 9 * freq / 25000000);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mxic_spi_set_freq(struct mxic_spi *mxic, unsigned long freq)
+{
+ int ret;
+
+ if (mxic->cur_speed_hz == freq)
+ return 0;
+
+ mxic_spi_clk_disable(mxic);
+ ret = mxic_spi_clk_setup(mxic, freq);
+ if (ret)
+ return ret;
+
+ ret = mxic_spi_clk_enable(mxic);
+ if (ret)
+ return ret;
+
+ mxic->cur_speed_hz = freq;
+
+ return 0;
+}
+
+static void mxic_spi_hw_init(struct mxic_spi *mxic)
+{
+ writel(0, mxic->regs + DATA_STROB);
+ writel(INT_STS_ALL, mxic->regs + INT_STS_EN);
+ writel(0, mxic->regs + HC_EN);
+ writel(0, mxic->regs + LRD_CFG);
+ writel(0, mxic->regs + LRD_CTRL);
+ writel(HC_CFG_NIO(1) | HC_CFG_TYPE(0, HC_CFG_TYPE_SPI_NOR) |
+ HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN | HC_CFG_IDLE_SIO_LVL(1),
+ mxic->regs + HC_CFG);
+}
+
+static u32 mxic_spi_prep_hc_cfg(struct spi_device *spi, u32 flags)
+{
+ int nio = 1;
+
+ if (spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL))
+ nio = 8;
+ else if (spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
+ nio = 4;
+ else if (spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL))
+ nio = 2;
+
+ return flags | HC_CFG_NIO(nio) |
+ HC_CFG_TYPE(spi->chip_select, HC_CFG_TYPE_SPI_NOR) |
+ HC_CFG_SLV_ACT(spi->chip_select) | HC_CFG_IDLE_SIO_LVL(1);
+}
+
+static u32 mxic_spi_mem_prep_op_cfg(const struct spi_mem_op *op,
+ unsigned int data_len)
+{
+ u32 cfg = OP_CMD_BYTES(op->cmd.nbytes) |
+ OP_CMD_BUSW(fls(op->cmd.buswidth) - 1) |
+ (op->cmd.dtr ? OP_CMD_DDR : 0);
+
+ if (op->addr.nbytes)
+ cfg |= OP_ADDR_BYTES(op->addr.nbytes) |
+ OP_ADDR_BUSW(fls(op->addr.buswidth) - 1) |
+ (op->addr.dtr ? OP_ADDR_DDR : 0);
+
+ if (op->dummy.nbytes)
+ cfg |= OP_DUMMY_CYC(op->dummy.nbytes);
+
+ /* Direct mapping data.nbytes field is not populated */
+ if (data_len) {
+ cfg |= OP_DATA_BUSW(fls(op->data.buswidth) - 1) |
+ (op->data.dtr ? OP_DATA_DDR : 0);
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ cfg |= OP_READ;
+ if (op->data.dtr)
+ cfg |= OP_DQS_EN;
+ }
+ }
+
+ return cfg;
+}
+
+static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
+ void *rxbuf, unsigned int len)
+{
+ unsigned int pos = 0;
+
+ while (pos < len) {
+ unsigned int nbytes = len - pos;
+ u32 data = 0xffffffff;
+ u32 sts;
+ int ret;
+
+ if (nbytes > 4)
+ nbytes = 4;
+
+ if (txbuf)
+ memcpy(&data, txbuf + pos, nbytes);
+
+ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
+ sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ writel(data, mxic->regs + TXD(nbytes % 4));
+
+ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
+ sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
+ sts & INT_RX_NOT_EMPTY, 0,
+ USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ data = readl(mxic->regs + RXD);
+ if (rxbuf) {
+ data >>= (8 * (4 - nbytes));
+ memcpy(rxbuf + pos, &data, nbytes);
+ }
+ WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY);
+
+ pos += nbytes;
+ }
+
+ return 0;
+}
+
+static ssize_t mxic_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct mxic_spi *mxic = spi_master_get_devdata(desc->mem->spi->master);
+ int ret;
+ u32 sts;
+
+ if (WARN_ON(offs + desc->info.offset + len > U32_MAX))
+ return -EINVAL;
+
+ writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0), mxic->regs + HC_CFG);
+
+ writel(mxic_spi_mem_prep_op_cfg(&desc->info.op_tmpl, len),
+ mxic->regs + LRD_CFG);
+ writel(desc->info.offset + offs, mxic->regs + LRD_ADDR);
+ len = min_t(size_t, len, mxic->linear.size);
+ writel(len, mxic->regs + LRD_RANGE);
+ writel(LMODE_CMD0(desc->info.op_tmpl.cmd.opcode) |
+ LMODE_SLV_ACT(desc->mem->spi->chip_select) |
+ LMODE_EN,
+ mxic->regs + LRD_CTRL);
+
+ if (mxic->ecc.use_pipelined_conf && desc->info.op_tmpl.data.ecc) {
+ ret = mxic_ecc_process_data_pipelined(mxic->ecc.pipelined_engine,
+ NAND_PAGE_READ,
+ mxic->linear.dma + offs);
+ if (ret)
+ return ret;
+ } else {
+ memcpy_fromio(buf, mxic->linear.map, len);
+ }
+
+ writel(INT_LRD_DIS, mxic->regs + INT_STS);
+ writel(0, mxic->regs + LRD_CTRL);
+
+ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
+ sts & INT_LRD_DIS, 0, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t mxic_spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len,
+ const void *buf)
+{
+ struct mxic_spi *mxic = spi_master_get_devdata(desc->mem->spi->master);
+ u32 sts;
+ int ret;
+
+ if (WARN_ON(offs + desc->info.offset + len > U32_MAX))
+ return -EINVAL;
+
+ writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0), mxic->regs + HC_CFG);
+
+ writel(mxic_spi_mem_prep_op_cfg(&desc->info.op_tmpl, len),
+ mxic->regs + LWR_CFG);
+ writel(desc->info.offset + offs, mxic->regs + LWR_ADDR);
+ len = min_t(size_t, len, mxic->linear.size);
+ writel(len, mxic->regs + LWR_RANGE);
+ writel(LMODE_CMD0(desc->info.op_tmpl.cmd.opcode) |
+ LMODE_SLV_ACT(desc->mem->spi->chip_select) |
+ LMODE_EN,
+ mxic->regs + LWR_CTRL);
+
+ if (mxic->ecc.use_pipelined_conf && desc->info.op_tmpl.data.ecc) {
+ ret = mxic_ecc_process_data_pipelined(mxic->ecc.pipelined_engine,
+ NAND_PAGE_WRITE,
+ mxic->linear.dma + offs);
+ if (ret)
+ return ret;
+ } else {
+ memcpy_toio(mxic->linear.map, buf, len);
+ }
+
+ writel(INT_LWR_DIS, mxic->regs + INT_STS);
+ writel(0, mxic->regs + LWR_CTRL);
+
+ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
+ sts & INT_LWR_DIS, 0, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (op->data.buswidth > 8 || op->addr.buswidth > 8 ||
+ op->dummy.buswidth > 8 || op->cmd.buswidth > 8)
+ return false;
+
+ if (op->data.nbytes && op->dummy.nbytes &&
+ op->data.buswidth != op->dummy.buswidth)
+ return false;
+
+ if (op->addr.nbytes > 7)
+ return false;
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+static int mxic_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct mxic_spi *mxic = spi_master_get_devdata(desc->mem->spi->master);
+
+ if (!mxic->linear.map)
+ return -EINVAL;
+
+ if (desc->info.offset + desc->info.length > U32_MAX)
+ return -EINVAL;
+
+ if (!mxic_spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int mxic_spi_mem_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct mxic_spi *mxic = spi_master_get_devdata(mem->spi->master);
+ int i, ret;
+ u8 addr[8], cmd[2];
+
+ ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
+ if (ret)
+ return ret;
+
+ writel(mxic_spi_prep_hc_cfg(mem->spi, HC_CFG_MAN_CS_EN),
+ mxic->regs + HC_CFG);
+
+ writel(HC_EN_BIT, mxic->regs + HC_EN);
+
+ writel(mxic_spi_mem_prep_op_cfg(op, op->data.nbytes),
+ mxic->regs + SS_CTRL(mem->spi->chip_select));
+
+ writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
+ mxic->regs + HC_CFG);
+
+ for (i = 0; i < op->cmd.nbytes; i++)
+ cmd[i] = op->cmd.opcode >> (8 * (op->cmd.nbytes - i - 1));
+
+ ret = mxic_spi_data_xfer(mxic, cmd, NULL, op->cmd.nbytes);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < op->addr.nbytes; i++)
+ addr[i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
+
+ ret = mxic_spi_data_xfer(mxic, addr, NULL, op->addr.nbytes);
+ if (ret)
+ goto out;
+
+ ret = mxic_spi_data_xfer(mxic, NULL, NULL, op->dummy.nbytes);
+ if (ret)
+ goto out;
+
+ ret = mxic_spi_data_xfer(mxic,
+ op->data.dir == SPI_MEM_DATA_OUT ?
+ op->data.buf.out : NULL,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ op->data.buf.in : NULL,
+ op->data.nbytes);
+
+out:
+ writel(readl(mxic->regs + HC_CFG) & ~HC_CFG_MAN_CS_ASSERT,
+ mxic->regs + HC_CFG);
+ writel(0, mxic->regs + HC_EN);
+
+ return ret;
+}
+
+static const struct spi_controller_mem_ops mxic_spi_mem_ops = {
+ .supports_op = mxic_spi_mem_supports_op,
+ .exec_op = mxic_spi_mem_exec_op,
+ .dirmap_create = mxic_spi_mem_dirmap_create,
+ .dirmap_read = mxic_spi_mem_dirmap_read,
+ .dirmap_write = mxic_spi_mem_dirmap_write,
+};
+
+static const struct spi_controller_mem_caps mxic_spi_mem_caps = {
+ .dtr = true,
+ .ecc = true,
+};
+
+static void mxic_spi_set_cs(struct spi_device *spi, bool lvl)
+{
+ struct mxic_spi *mxic = spi_master_get_devdata(spi->master);
+
+ if (!lvl) {
+ writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_EN,
+ mxic->regs + HC_CFG);
+ writel(HC_EN_BIT, mxic->regs + HC_EN);
+ writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
+ mxic->regs + HC_CFG);
+ } else {
+ writel(readl(mxic->regs + HC_CFG) & ~HC_CFG_MAN_CS_ASSERT,
+ mxic->regs + HC_CFG);
+ writel(0, mxic->regs + HC_EN);
+ }
+}
+
+static int mxic_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mxic_spi *mxic = spi_master_get_devdata(master);
+ unsigned int busw = OP_BUSW_1;
+ int ret;
+
+ if (t->rx_buf && t->tx_buf) {
+ if (((spi->mode & SPI_TX_QUAD) &&
+ !(spi->mode & SPI_RX_QUAD)) ||
+ ((spi->mode & SPI_TX_DUAL) &&
+ !(spi->mode & SPI_RX_DUAL)))
+ return -ENOTSUPP;
+ }
+
+ ret = mxic_spi_set_freq(mxic, t->speed_hz);
+ if (ret)
+ return ret;
+
+ if (t->tx_buf) {
+ if (spi->mode & SPI_TX_QUAD)
+ busw = OP_BUSW_4;
+ else if (spi->mode & SPI_TX_DUAL)
+ busw = OP_BUSW_2;
+ } else if (t->rx_buf) {
+ if (spi->mode & SPI_RX_QUAD)
+ busw = OP_BUSW_4;
+ else if (spi->mode & SPI_RX_DUAL)
+ busw = OP_BUSW_2;
+ }
+
+ writel(OP_CMD_BYTES(1) | OP_CMD_BUSW(busw) |
+ OP_DATA_BUSW(busw) | (t->rx_buf ? OP_READ : 0),
+ mxic->regs + SS_CTRL(0));
+
+ ret = mxic_spi_data_xfer(mxic, t->tx_buf, t->rx_buf, t->len);
+ if (ret)
+ return ret;
+
+ spi_finalize_current_transfer(master);
+
+ return 0;
+}
+
+/* ECC wrapper */
+static int mxic_spi_mem_ecc_init_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
+ struct mxic_spi *mxic = nand->ecc.engine->priv;
+
+ mxic->ecc.use_pipelined_conf = true;
+
+ return ops->init_ctx(nand);
+}
+
+static void mxic_spi_mem_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
+ struct mxic_spi *mxic = nand->ecc.engine->priv;
+
+ mxic->ecc.use_pipelined_conf = false;
+
+ ops->cleanup_ctx(nand);
+}
+
+static int mxic_spi_mem_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
+
+ return ops->prepare_io_req(nand, req);
+}
+
+static int mxic_spi_mem_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
+
+ return ops->finish_io_req(nand, req);
+}
+
+static struct nand_ecc_engine_ops mxic_spi_mem_ecc_engine_pipelined_ops = {
+ .init_ctx = mxic_spi_mem_ecc_init_ctx,
+ .cleanup_ctx = mxic_spi_mem_ecc_cleanup_ctx,
+ .prepare_io_req = mxic_spi_mem_ecc_prepare_io_req,
+ .finish_io_req = mxic_spi_mem_ecc_finish_io_req,
+};
+
+static void mxic_spi_mem_ecc_remove(struct mxic_spi *mxic)
+{
+ if (mxic->ecc.pipelined_engine) {
+ mxic_ecc_put_pipelined_engine(mxic->ecc.pipelined_engine);
+ nand_ecc_unregister_on_host_hw_engine(mxic->ecc.pipelined_engine);
+ }
+}
+
+static int mxic_spi_mem_ecc_probe(struct platform_device *pdev,
+ struct mxic_spi *mxic)
+{
+ struct nand_ecc_engine *eng;
+
+ if (!mxic_ecc_get_pipelined_ops())
+ return -EOPNOTSUPP;
+
+ eng = mxic_ecc_get_pipelined_engine(pdev);
+ if (IS_ERR(eng))
+ return PTR_ERR(eng);
+
+ eng->dev = &pdev->dev;
+ eng->integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
+ eng->ops = &mxic_spi_mem_ecc_engine_pipelined_ops;
+ eng->priv = mxic;
+ mxic->ecc.pipelined_engine = eng;
+ nand_ecc_register_on_host_hw_engine(eng);
+
+ return 0;
+}
+
+static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxic_spi *mxic = spi_master_get_devdata(master);
+
+ mxic_spi_clk_disable(mxic);
+ clk_disable_unprepare(mxic->ps_clk);
+
+ return 0;
+}
+
+static int __maybe_unused mxic_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxic_spi *mxic = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(mxic->ps_clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable ps_clock.\n");
+ return ret;
+ }
+
+ return mxic_spi_clk_enable(mxic);
+}
+
+static const struct dev_pm_ops mxic_spi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(mxic_spi_runtime_suspend,
+ mxic_spi_runtime_resume, NULL)
+};
+
+static int mxic_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct resource *res;
+ struct mxic_spi *mxic;
+ int ret;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct mxic_spi));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ mxic = spi_master_get_devdata(master);
+ mxic->dev = &pdev->dev;
+
+ master->dev.of_node = pdev->dev.of_node;
+
+ mxic->ps_clk = devm_clk_get(&pdev->dev, "ps_clk");
+ if (IS_ERR(mxic->ps_clk))
+ return PTR_ERR(mxic->ps_clk);
+
+ mxic->send_clk = devm_clk_get(&pdev->dev, "send_clk");
+ if (IS_ERR(mxic->send_clk))
+ return PTR_ERR(mxic->send_clk);
+
+ mxic->send_dly_clk = devm_clk_get(&pdev->dev, "send_dly_clk");
+ if (IS_ERR(mxic->send_dly_clk))
+ return PTR_ERR(mxic->send_dly_clk);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ mxic->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mxic->regs))
+ return PTR_ERR(mxic->regs);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
+ mxic->linear.map = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(mxic->linear.map)) {
+ mxic->linear.dma = res->start;
+ mxic->linear.size = resource_size(res);
+ } else {
+ mxic->linear.map = NULL;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ master->auto_runtime_pm = true;
+
+ master->num_chipselect = 1;
+ master->mem_ops = &mxic_spi_mem_ops;
+ master->mem_caps = &mxic_spi_mem_caps;
+
+ master->set_cs = mxic_spi_set_cs;
+ master->transfer_one = mxic_spi_transfer_one;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->mode_bits = SPI_CPOL | SPI_CPHA |
+ SPI_RX_DUAL | SPI_TX_DUAL |
+ SPI_RX_QUAD | SPI_TX_QUAD |
+ SPI_RX_OCTAL | SPI_TX_OCTAL;
+
+ mxic_spi_hw_init(mxic);
+
+ ret = mxic_spi_mem_ecc_probe(pdev, mxic);
+ if (ret == -EPROBE_DEFER) {
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ pm_runtime_disable(&pdev->dev);
+ mxic_spi_mem_ecc_remove(mxic);
+ }
+
+ return ret;
+}
+
+static int mxic_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mxic_spi *mxic = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+ mxic_spi_mem_ecc_remove(mxic);
+ spi_unregister_master(master);
+
+ return 0;
+}
+
+static const struct of_device_id mxic_spi_of_ids[] = {
+ { .compatible = "mxicy,mx25f0a-spi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxic_spi_of_ids);
+
+static struct platform_driver mxic_spi_driver = {
+ .probe = mxic_spi_probe,
+ .remove = mxic_spi_remove,
+ .driver = {
+ .name = "mxic-spi",
+ .of_match_table = mxic_spi_of_ids,
+ .pm = &mxic_spi_dev_pm_ops,
+ },
+};
+module_platform_driver(mxic_spi_driver);
+
+MODULE_AUTHOR("Mason Yang <masonccyang@mxic.com.tw>");
+MODULE_DESCRIPTION("MX25F0A SPI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
new file mode 100644
index 000000000..55178579f
--- /dev/null
+++ b/drivers/spi/spi-mxs.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Freescale MXS SPI master driver
+//
+// Copyright 2012 DENX Software Engineering, GmbH.
+// Copyright 2012 Freescale Semiconductor, Inc.
+// Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+//
+// Rework and transition to new API by:
+// Marek Vasut <marex@denx.de>
+//
+// Based on previous attempt by:
+// Fabio Estevam <fabio.estevam@freescale.com>
+//
+// Based on code from U-Boot bootloader by:
+// Marek Vasut <marex@denx.de>
+//
+// Based on spi-stmp.c, which is:
+// Author: Dmitry Pervushin <dimka@embeddedalley.com>
+
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/highmem.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/stmp_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/mxs-spi.h>
+#include <trace/events/spi.h>
+
+#define DRIVER_NAME "mxs-spi"
+
+/* Use 10S timeout for very long transfers, it should suffice. */
+#define SSP_TIMEOUT 10000
+
+#define SG_MAXLEN 0xff00
+
+/*
+ * Flags for txrx functions. More efficient that using an argument register for
+ * each one.
+ */
+#define TXRX_WRITE (1<<0) /* This is a write */
+#define TXRX_DEASSERT_CS (1<<1) /* De-assert CS at end of txrx */
+
+struct mxs_spi {
+ struct mxs_ssp ssp;
+ struct completion c;
+ unsigned int sck; /* Rate requested (vs actual) */
+};
+
+static int mxs_spi_setup_transfer(struct spi_device *dev,
+ const struct spi_transfer *t)
+{
+ struct mxs_spi *spi = spi_master_get_devdata(dev->master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ const unsigned int hz = min(dev->max_speed_hz, t->speed_hz);
+
+ if (hz == 0) {
+ dev_err(&dev->dev, "SPI clock rate of zero not allowed\n");
+ return -EINVAL;
+ }
+
+ if (hz != spi->sck) {
+ mxs_ssp_set_clk_rate(ssp, hz);
+ /*
+ * Save requested rate, hz, rather than the actual rate,
+ * ssp->clk_rate. Otherwise we would set the rate every transfer
+ * when the actual rate is not quite the same as requested rate.
+ */
+ spi->sck = hz;
+ /*
+ * Perhaps we should return an error if the actual clock is
+ * nowhere close to what was requested?
+ */
+ }
+
+ writel(BM_SSP_CTRL0_LOCK_CS,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
+ BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
+ ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
+ ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
+ ssp->base + HW_SSP_CTRL1(ssp));
+
+ writel(0x0, ssp->base + HW_SSP_CMD0);
+ writel(0x0, ssp->base + HW_SSP_CMD1);
+
+ return 0;
+}
+
+static u32 mxs_spi_cs_to_reg(unsigned cs)
+{
+ u32 select = 0;
+
+ /*
+ * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0
+ *
+ * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ
+ * in HW_SSP_CTRL0 register do have multiple usage, please refer to
+ * the datasheet for further details. In SPI mode, they are used to
+ * toggle the chip-select lines (nCS pins).
+ */
+ if (cs & 1)
+ select |= BM_SSP_CTRL0_WAIT_FOR_CMD;
+ if (cs & 2)
+ select |= BM_SSP_CTRL0_WAIT_FOR_IRQ;
+
+ return select;
+}
+
+static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
+{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
+ struct mxs_ssp *ssp = &spi->ssp;
+ u32 reg;
+
+ do {
+ reg = readl_relaxed(ssp->base + offset);
+
+ if (!set)
+ reg = ~reg;
+
+ reg &= mask;
+
+ if (reg == mask)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static void mxs_ssp_dma_irq_callback(void *param)
+{
+ struct mxs_spi *spi = param;
+
+ complete(&spi->c);
+}
+
+static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
+{
+ struct mxs_ssp *ssp = dev_id;
+
+ dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
+ __func__, __LINE__,
+ readl(ssp->base + HW_SSP_CTRL1(ssp)),
+ readl(ssp->base + HW_SSP_STATUS(ssp)));
+ return IRQ_HANDLED;
+}
+
+static int mxs_spi_txrx_dma(struct mxs_spi *spi,
+ unsigned char *buf, int len,
+ unsigned int flags)
+{
+ struct mxs_ssp *ssp = &spi->ssp;
+ struct dma_async_tx_descriptor *desc = NULL;
+ const bool vmalloced_buf = is_vmalloc_addr(buf);
+ const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
+ const int sgs = DIV_ROUND_UP(len, desc_len);
+ int sg_count;
+ int min, ret;
+ u32 ctrl0;
+ struct page *vm_page;
+ struct {
+ u32 pio[4];
+ struct scatterlist sg;
+ } *dma_xfer;
+
+ if (!len)
+ return -EINVAL;
+
+ dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL);
+ if (!dma_xfer)
+ return -ENOMEM;
+
+ reinit_completion(&spi->c);
+
+ /* Chip select was already programmed into CTRL0 */
+ ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
+ ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC |
+ BM_SSP_CTRL0_READ);
+ ctrl0 |= BM_SSP_CTRL0_DATA_XFER;
+
+ if (!(flags & TXRX_WRITE))
+ ctrl0 |= BM_SSP_CTRL0_READ;
+
+ /* Queue the DMA data transfer. */
+ for (sg_count = 0; sg_count < sgs; sg_count++) {
+ /* Prepare the transfer descriptor. */
+ min = min(len, desc_len);
+
+ /*
+ * De-assert CS on last segment if flag is set (i.e., no more
+ * transfers will follow)
+ */
+ if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
+ ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
+
+ if (ssp->devid == IMX23_SSP) {
+ ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
+ ctrl0 |= min;
+ }
+
+ dma_xfer[sg_count].pio[0] = ctrl0;
+ dma_xfer[sg_count].pio[3] = min;
+
+ if (vmalloced_buf) {
+ vm_page = vmalloc_to_page(buf);
+ if (!vm_page) {
+ ret = -ENOMEM;
+ goto err_vmalloc;
+ }
+
+ sg_init_table(&dma_xfer[sg_count].sg, 1);
+ sg_set_page(&dma_xfer[sg_count].sg, vm_page,
+ min, offset_in_page(buf));
+ } else {
+ sg_init_one(&dma_xfer[sg_count].sg, buf, min);
+ }
+
+ ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
+ (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ len -= min;
+ buf += min;
+
+ /* Queue the PIO register write transfer. */
+ desc = dmaengine_prep_slave_sg(ssp->dmach,
+ (struct scatterlist *)dma_xfer[sg_count].pio,
+ (ssp->devid == IMX23_SSP) ? 1 : 4,
+ DMA_TRANS_NONE,
+ sg_count ? DMA_PREP_INTERRUPT : 0);
+ if (!desc) {
+ dev_err(ssp->dev,
+ "Failed to get PIO reg. write descriptor.\n");
+ ret = -EINVAL;
+ goto err_mapped;
+ }
+
+ desc = dmaengine_prep_slave_sg(ssp->dmach,
+ &dma_xfer[sg_count].sg, 1,
+ (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!desc) {
+ dev_err(ssp->dev,
+ "Failed to get DMA data write descriptor.\n");
+ ret = -EINVAL;
+ goto err_mapped;
+ }
+ }
+
+ /*
+ * The last descriptor must have this callback,
+ * to finish the DMA transaction.
+ */
+ desc->callback = mxs_ssp_dma_irq_callback;
+ desc->callback_param = spi;
+
+ /* Start the transfer. */
+ dmaengine_submit(desc);
+ dma_async_issue_pending(ssp->dmach);
+
+ if (!wait_for_completion_timeout(&spi->c,
+ msecs_to_jiffies(SSP_TIMEOUT))) {
+ dev_err(ssp->dev, "DMA transfer timeout\n");
+ ret = -ETIMEDOUT;
+ dmaengine_terminate_all(ssp->dmach);
+ goto err_vmalloc;
+ }
+
+ ret = 0;
+
+err_vmalloc:
+ while (--sg_count >= 0) {
+err_mapped:
+ dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
+ (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ }
+
+ kfree(dma_xfer);
+
+ return ret;
+}
+
+static int mxs_spi_txrx_pio(struct mxs_spi *spi,
+ unsigned char *buf, int len,
+ unsigned int flags)
+{
+ struct mxs_ssp *ssp = &spi->ssp;
+
+ writel(BM_SSP_CTRL0_IGNORE_CRC,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+
+ while (len--) {
+ if (len == 0 && (flags & TXRX_DEASSERT_CS))
+ writel(BM_SSP_CTRL0_IGNORE_CRC,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ if (ssp->devid == IMX23_SSP) {
+ writel(BM_SSP_CTRL0_XFER_COUNT,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(1,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+ } else {
+ writel(1, ssp->base + HW_SSP_XFER_SIZE);
+ }
+
+ if (flags & TXRX_WRITE)
+ writel(BM_SSP_CTRL0_READ,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ else
+ writel(BM_SSP_CTRL0_READ,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ writel(BM_SSP_CTRL0_RUN,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
+ return -ETIMEDOUT;
+
+ if (flags & TXRX_WRITE)
+ writel(*buf, ssp->base + HW_SSP_DATA(ssp));
+
+ writel(BM_SSP_CTRL0_DATA_XFER,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ if (!(flags & TXRX_WRITE)) {
+ if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
+ BM_SSP_STATUS_FIFO_EMPTY, 0))
+ return -ETIMEDOUT;
+
+ *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff);
+ }
+
+ if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0))
+ return -ETIMEDOUT;
+
+ buf++;
+ }
+
+ if (len <= 0)
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+static int mxs_spi_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ struct spi_transfer *t;
+ unsigned int flag;
+ int status = 0;
+
+ /* Program CS register bits here, it will be used for all transfers. */
+ writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(mxs_spi_cs_to_reg(m->spi->chip_select),
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+
+ trace_spi_transfer_start(m, t);
+
+ status = mxs_spi_setup_transfer(m->spi, t);
+ if (status)
+ break;
+
+ /* De-assert on last transfer, inverted by cs_change flag */
+ flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
+ TXRX_DEASSERT_CS : 0;
+
+ /*
+ * Small blocks can be transfered via PIO.
+ * Measured by empiric means:
+ *
+ * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1
+ *
+ * DMA only: 2.164808 seconds, 473.0KB/s
+ * Combined: 1.676276 seconds, 610.9KB/s
+ */
+ if (t->len < 32) {
+ writel(BM_SSP_CTRL1_DMA_ENABLE,
+ ssp->base + HW_SSP_CTRL1(ssp) +
+ STMP_OFFSET_REG_CLR);
+
+ if (t->tx_buf)
+ status = mxs_spi_txrx_pio(spi,
+ (void *)t->tx_buf,
+ t->len, flag | TXRX_WRITE);
+ if (t->rx_buf)
+ status = mxs_spi_txrx_pio(spi,
+ t->rx_buf, t->len,
+ flag);
+ } else {
+ writel(BM_SSP_CTRL1_DMA_ENABLE,
+ ssp->base + HW_SSP_CTRL1(ssp) +
+ STMP_OFFSET_REG_SET);
+
+ if (t->tx_buf)
+ status = mxs_spi_txrx_dma(spi,
+ (void *)t->tx_buf, t->len,
+ flag | TXRX_WRITE);
+ if (t->rx_buf)
+ status = mxs_spi_txrx_dma(spi,
+ t->rx_buf, t->len,
+ flag);
+ }
+
+ trace_spi_transfer_stop(m, t);
+
+ if (status) {
+ stmp_reset_block(ssp->base);
+ break;
+ }
+
+ m->actual_length += t->len;
+ }
+
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static int mxs_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ int ret;
+
+ clk_disable_unprepare(ssp->clk);
+
+ ret = pinctrl_pm_select_idle_state(dev);
+ if (ret) {
+ int ret2 = clk_prepare_enable(ssp->clk);
+
+ if (ret2)
+ dev_warn(dev, "Failed to reenable clock after failing pinctrl request (pinctrl: %d, clk: %d)\n",
+ ret, ret2);
+ }
+
+ return ret;
+}
+
+static int mxs_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(ssp->clk);
+ if (ret)
+ pinctrl_pm_select_idle_state(dev);
+
+ return ret;
+}
+
+static int __maybe_unused mxs_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ return mxs_spi_runtime_suspend(dev);
+ else
+ return 0;
+}
+
+static int __maybe_unused mxs_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pm_runtime_suspended(dev))
+ ret = mxs_spi_runtime_resume(dev);
+ else
+ ret = 0;
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(master);
+ if (ret < 0 && !pm_runtime_suspended(dev))
+ mxs_spi_runtime_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mxs_spi_pm = {
+ SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend,
+ mxs_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume)
+};
+
+static const struct of_device_id mxs_spi_dt_ids[] = {
+ { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
+ { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
+
+static int mxs_spi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_spi_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_master *master;
+ struct mxs_spi *spi;
+ struct mxs_ssp *ssp;
+ struct clk *clk;
+ void __iomem *base;
+ int devid, clk_freq;
+ int ret = 0, irq_err;
+
+ /*
+ * Default clock speed for the SPI core. 160MHz seems to
+ * work reasonably well with most SPI flashes, so use this
+ * as a default. Override with "clock-frequency" DT prop.
+ */
+ const int clk_freq_default = 160000000;
+
+ irq_err = platform_get_irq(pdev, 0);
+ if (irq_err < 0)
+ return irq_err;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ devid = (enum mxs_ssp_id) of_id->data;
+ ret = of_property_read_u32(np, "clock-frequency",
+ &clk_freq);
+ if (ret)
+ clk_freq = clk_freq_default;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ master->transfer_one_message = mxs_spi_transfer_one;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->num_chipselect = 3;
+ master->dev.of_node = np;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->auto_runtime_pm = true;
+
+ spi = spi_master_get_devdata(master);
+ ssp = &spi->ssp;
+ ssp->dev = &pdev->dev;
+ ssp->clk = clk;
+ ssp->base = base;
+ ssp->devid = devid;
+
+ init_completion(&spi->c);
+
+ ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
+ dev_name(&pdev->dev), ssp);
+ if (ret)
+ goto out_master_free;
+
+ ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(ssp->dmach)) {
+ dev_err(ssp->dev, "Failed to request DMA\n");
+ ret = PTR_ERR(ssp->dmach);
+ goto out_master_free;
+ }
+
+ pm_runtime_enable(ssp->dev);
+ if (!pm_runtime_enabled(ssp->dev)) {
+ ret = mxs_spi_runtime_resume(ssp->dev);
+ if (ret < 0) {
+ dev_err(ssp->dev, "runtime resume failed\n");
+ goto out_dma_release;
+ }
+ }
+
+ ret = pm_runtime_resume_and_get(ssp->dev);
+ if (ret < 0) {
+ dev_err(ssp->dev, "runtime_get_sync failed\n");
+ goto out_pm_runtime_disable;
+ }
+
+ clk_set_rate(ssp->clk, clk_freq);
+
+ ret = stmp_reset_block(ssp->base);
+ if (ret)
+ goto out_pm_runtime_put;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
+ goto out_pm_runtime_put;
+ }
+
+ pm_runtime_put(ssp->dev);
+
+ return 0;
+
+out_pm_runtime_put:
+ pm_runtime_put(ssp->dev);
+out_pm_runtime_disable:
+ pm_runtime_disable(ssp->dev);
+out_dma_release:
+ dma_release_channel(ssp->dmach);
+out_master_free:
+ spi_master_put(master);
+ return ret;
+}
+
+static int mxs_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct mxs_spi *spi;
+ struct mxs_ssp *ssp;
+
+ master = platform_get_drvdata(pdev);
+ spi = spi_master_get_devdata(master);
+ ssp = &spi->ssp;
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mxs_spi_runtime_suspend(&pdev->dev);
+
+ dma_release_channel(ssp->dmach);
+
+ return 0;
+}
+
+static struct platform_driver mxs_spi_driver = {
+ .probe = mxs_spi_probe,
+ .remove = mxs_spi_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mxs_spi_dt_ids,
+ .pm = &mxs_spi_pm,
+ },
+};
+
+module_platform_driver(mxs_spi_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("MXS SPI master driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-spi");
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
new file mode 100644
index 000000000..0624f5288
--- /dev/null
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/bits.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/regmap.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/mfd/syscon.h>
+
+/* NPCM7xx GCR module */
+#define NPCM7XX_INTCR3_OFFSET 0x9C
+#define NPCM7XX_INTCR3_FIU_FIX BIT(6)
+
+/* Flash Interface Unit (FIU) Registers */
+#define NPCM_FIU_DRD_CFG 0x00
+#define NPCM_FIU_DWR_CFG 0x04
+#define NPCM_FIU_UMA_CFG 0x08
+#define NPCM_FIU_UMA_CTS 0x0C
+#define NPCM_FIU_UMA_CMD 0x10
+#define NPCM_FIU_UMA_ADDR 0x14
+#define NPCM_FIU_PRT_CFG 0x18
+#define NPCM_FIU_UMA_DW0 0x20
+#define NPCM_FIU_UMA_DW1 0x24
+#define NPCM_FIU_UMA_DW2 0x28
+#define NPCM_FIU_UMA_DW3 0x2C
+#define NPCM_FIU_UMA_DR0 0x30
+#define NPCM_FIU_UMA_DR1 0x34
+#define NPCM_FIU_UMA_DR2 0x38
+#define NPCM_FIU_UMA_DR3 0x3C
+#define NPCM_FIU_CFG 0x78
+#define NPCM_FIU_MAX_REG_LIMIT 0x80
+
+/* FIU Direct Read Configuration Register */
+#define NPCM_FIU_DRD_CFG_LCK BIT(31)
+#define NPCM_FIU_DRD_CFG_R_BURST GENMASK(25, 24)
+#define NPCM_FIU_DRD_CFG_ADDSIZ GENMASK(17, 16)
+#define NPCM_FIU_DRD_CFG_DBW GENMASK(13, 12)
+#define NPCM_FIU_DRD_CFG_ACCTYPE GENMASK(9, 8)
+#define NPCM_FIU_DRD_CFG_RDCMD GENMASK(7, 0)
+#define NPCM_FIU_DRD_ADDSIZ_SHIFT 16
+#define NPCM_FIU_DRD_DBW_SHIFT 12
+#define NPCM_FIU_DRD_ACCTYPE_SHIFT 8
+
+/* FIU Direct Write Configuration Register */
+#define NPCM_FIU_DWR_CFG_LCK BIT(31)
+#define NPCM_FIU_DWR_CFG_W_BURST GENMASK(25, 24)
+#define NPCM_FIU_DWR_CFG_ADDSIZ GENMASK(17, 16)
+#define NPCM_FIU_DWR_CFG_ABPCK GENMASK(11, 10)
+#define NPCM_FIU_DWR_CFG_DBPCK GENMASK(9, 8)
+#define NPCM_FIU_DWR_CFG_WRCMD GENMASK(7, 0)
+#define NPCM_FIU_DWR_ADDSIZ_SHIFT 16
+#define NPCM_FIU_DWR_ABPCK_SHIFT 10
+#define NPCM_FIU_DWR_DBPCK_SHIFT 8
+
+/* FIU UMA Configuration Register */
+#define NPCM_FIU_UMA_CFG_LCK BIT(31)
+#define NPCM_FIU_UMA_CFG_CMMLCK BIT(30)
+#define NPCM_FIU_UMA_CFG_RDATSIZ GENMASK(28, 24)
+#define NPCM_FIU_UMA_CFG_DBSIZ GENMASK(23, 21)
+#define NPCM_FIU_UMA_CFG_WDATSIZ GENMASK(20, 16)
+#define NPCM_FIU_UMA_CFG_ADDSIZ GENMASK(13, 11)
+#define NPCM_FIU_UMA_CFG_CMDSIZ BIT(10)
+#define NPCM_FIU_UMA_CFG_RDBPCK GENMASK(9, 8)
+#define NPCM_FIU_UMA_CFG_DBPCK GENMASK(7, 6)
+#define NPCM_FIU_UMA_CFG_WDBPCK GENMASK(5, 4)
+#define NPCM_FIU_UMA_CFG_ADBPCK GENMASK(3, 2)
+#define NPCM_FIU_UMA_CFG_CMBPCK GENMASK(1, 0)
+#define NPCM_FIU_UMA_CFG_ADBPCK_SHIFT 2
+#define NPCM_FIU_UMA_CFG_WDBPCK_SHIFT 4
+#define NPCM_FIU_UMA_CFG_DBPCK_SHIFT 6
+#define NPCM_FIU_UMA_CFG_RDBPCK_SHIFT 8
+#define NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT 11
+#define NPCM_FIU_UMA_CFG_WDATSIZ_SHIFT 16
+#define NPCM_FIU_UMA_CFG_DBSIZ_SHIFT 21
+#define NPCM_FIU_UMA_CFG_RDATSIZ_SHIFT 24
+
+/* FIU UMA Control and Status Register */
+#define NPCM_FIU_UMA_CTS_RDYIE BIT(25)
+#define NPCM_FIU_UMA_CTS_RDYST BIT(24)
+#define NPCM_FIU_UMA_CTS_SW_CS BIT(16)
+#define NPCM_FIU_UMA_CTS_DEV_NUM GENMASK(9, 8)
+#define NPCM_FIU_UMA_CTS_EXEC_DONE BIT(0)
+#define NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT 8
+
+/* FIU UMA Command Register */
+#define NPCM_FIU_UMA_CMD_DUM3 GENMASK(31, 24)
+#define NPCM_FIU_UMA_CMD_DUM2 GENMASK(23, 16)
+#define NPCM_FIU_UMA_CMD_DUM1 GENMASK(15, 8)
+#define NPCM_FIU_UMA_CMD_CMD GENMASK(7, 0)
+
+/* FIU UMA Address Register */
+#define NPCM_FIU_UMA_ADDR_UMA_ADDR GENMASK(31, 0)
+#define NPCM_FIU_UMA_ADDR_AB3 GENMASK(31, 24)
+#define NPCM_FIU_UMA_ADDR_AB2 GENMASK(23, 16)
+#define NPCM_FIU_UMA_ADDR_AB1 GENMASK(15, 8)
+#define NPCM_FIU_UMA_ADDR_AB0 GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 0-3 Register */
+#define NPCM_FIU_UMA_DW0_WB3 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW0_WB2 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW0_WB1 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW0_WB0 GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 4-7 Register */
+#define NPCM_FIU_UMA_DW1_WB7 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW1_WB6 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW1_WB5 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW1_WB4 GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 8-11 Register */
+#define NPCM_FIU_UMA_DW2_WB11 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW2_WB10 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW2_WB9 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW2_WB8 GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 12-15 Register */
+#define NPCM_FIU_UMA_DW3_WB15 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW3_WB14 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW3_WB13 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW3_WB12 GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 0-3 Register */
+#define NPCM_FIU_UMA_DR0_RB3 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR0_RB2 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR0_RB1 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR0_RB0 GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 4-7 Register */
+#define NPCM_FIU_UMA_DR1_RB15 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR1_RB14 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR1_RB13 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR1_RB12 GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 8-11 Register */
+#define NPCM_FIU_UMA_DR2_RB15 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR2_RB14 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR2_RB13 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR2_RB12 GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 12-15 Register */
+#define NPCM_FIU_UMA_DR3_RB15 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR3_RB14 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR3_RB13 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR3_RB12 GENMASK(7, 0)
+
+/* FIU Configuration Register */
+#define NPCM_FIU_CFG_FIU_FIX BIT(31)
+
+/* FIU Read Mode */
+enum {
+ DRD_SINGLE_WIRE_MODE = 0,
+ DRD_DUAL_IO_MODE = 1,
+ DRD_QUAD_IO_MODE = 2,
+ DRD_SPI_X_MODE = 3,
+};
+
+enum {
+ DWR_ABPCK_BIT_PER_CLK = 0,
+ DWR_ABPCK_2_BIT_PER_CLK = 1,
+ DWR_ABPCK_4_BIT_PER_CLK = 2,
+};
+
+enum {
+ DWR_DBPCK_BIT_PER_CLK = 0,
+ DWR_DBPCK_2_BIT_PER_CLK = 1,
+ DWR_DBPCK_4_BIT_PER_CLK = 2,
+};
+
+#define NPCM_FIU_DRD_16_BYTE_BURST 0x3000000
+#define NPCM_FIU_DWR_16_BYTE_BURST 0x3000000
+
+#define MAP_SIZE_128MB 0x8000000
+#define MAP_SIZE_16MB 0x1000000
+#define MAP_SIZE_8MB 0x800000
+
+#define FIU_DRD_MAX_DUMMY_NUMBER 3
+#define NPCM_MAX_CHIP_NUM 4
+#define CHUNK_SIZE 16
+#define UMA_MICRO_SEC_TIMEOUT 150
+
+enum {
+ FIU0 = 0,
+ FIU3,
+ FIUX,
+ FIU1,
+};
+
+struct npcm_fiu_info {
+ char *name;
+ u32 fiu_id;
+ u32 max_map_size;
+ u32 max_cs;
+};
+
+struct fiu_data {
+ const struct npcm_fiu_info *npcm_fiu_data_info;
+ int fiu_max;
+};
+
+static const struct npcm_fiu_info npcm7xx_fiu_info[] = {
+ {.name = "FIU0", .fiu_id = FIU0,
+ .max_map_size = MAP_SIZE_128MB, .max_cs = 2},
+ {.name = "FIU3", .fiu_id = FIU3,
+ .max_map_size = MAP_SIZE_128MB, .max_cs = 4},
+ {.name = "FIUX", .fiu_id = FIUX,
+ .max_map_size = MAP_SIZE_16MB, .max_cs = 2} };
+
+static const struct fiu_data npcm7xx_fiu_data = {
+ .npcm_fiu_data_info = npcm7xx_fiu_info,
+ .fiu_max = 3,
+};
+
+static const struct npcm_fiu_info npxm8xx_fiu_info[] = {
+ {.name = "FIU0", .fiu_id = FIU0,
+ .max_map_size = MAP_SIZE_128MB, .max_cs = 2},
+ {.name = "FIU3", .fiu_id = FIU3,
+ .max_map_size = MAP_SIZE_128MB, .max_cs = 4},
+ {.name = "FIUX", .fiu_id = FIUX,
+ .max_map_size = MAP_SIZE_16MB, .max_cs = 2},
+ {.name = "FIU1", .fiu_id = FIU1,
+ .max_map_size = MAP_SIZE_16MB, .max_cs = 4} };
+
+static const struct fiu_data npxm8xx_fiu_data = {
+ .npcm_fiu_data_info = npxm8xx_fiu_info,
+ .fiu_max = 4,
+};
+
+struct npcm_fiu_spi;
+
+struct npcm_fiu_chip {
+ void __iomem *flash_region_mapped_ptr;
+ struct npcm_fiu_spi *fiu;
+ unsigned long clkrate;
+ u32 chipselect;
+};
+
+struct npcm_fiu_spi {
+ struct npcm_fiu_chip chip[NPCM_MAX_CHIP_NUM];
+ const struct npcm_fiu_info *info;
+ struct spi_mem_op drd_op;
+ struct resource *res_mem;
+ struct regmap *regmap;
+ unsigned long clkrate;
+ struct device *dev;
+ struct clk *clk;
+ bool spix_mode;
+};
+
+static const struct regmap_config npcm_mtd_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = NPCM_FIU_MAX_REG_LIMIT,
+};
+
+static void npcm_fiu_set_drd(struct npcm_fiu_spi *fiu,
+ const struct spi_mem_op *op)
+{
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_ACCTYPE,
+ ilog2(op->addr.buswidth) <<
+ NPCM_FIU_DRD_ACCTYPE_SHIFT);
+ fiu->drd_op.addr.buswidth = op->addr.buswidth;
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_DBW,
+ op->dummy.nbytes << NPCM_FIU_DRD_DBW_SHIFT);
+ fiu->drd_op.dummy.nbytes = op->dummy.nbytes;
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_RDCMD, op->cmd.opcode);
+ fiu->drd_op.cmd.opcode = op->cmd.opcode;
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_ADDSIZ,
+ (op->addr.nbytes - 3) << NPCM_FIU_DRD_ADDSIZ_SHIFT);
+ fiu->drd_op.addr.nbytes = op->addr.nbytes;
+}
+
+static ssize_t npcm_fiu_direct_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(desc->mem->spi->master);
+ struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
+ void __iomem *src = (void __iomem *)(chip->flash_region_mapped_ptr +
+ offs);
+ u8 *buf_rx = buf;
+ u32 i;
+
+ if (fiu->spix_mode) {
+ for (i = 0 ; i < len ; i++)
+ *(buf_rx + i) = ioread8(src + i);
+ } else {
+ if (desc->info.op_tmpl.addr.buswidth != fiu->drd_op.addr.buswidth ||
+ desc->info.op_tmpl.dummy.nbytes != fiu->drd_op.dummy.nbytes ||
+ desc->info.op_tmpl.cmd.opcode != fiu->drd_op.cmd.opcode ||
+ desc->info.op_tmpl.addr.nbytes != fiu->drd_op.addr.nbytes)
+ npcm_fiu_set_drd(fiu, &desc->info.op_tmpl);
+
+ memcpy_fromio(buf_rx, src, len);
+ }
+
+ return len;
+}
+
+static ssize_t npcm_fiu_direct_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(desc->mem->spi->master);
+ struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
+ void __iomem *dst = (void __iomem *)(chip->flash_region_mapped_ptr +
+ offs);
+ const u8 *buf_tx = buf;
+ u32 i;
+
+ if (fiu->spix_mode)
+ for (i = 0 ; i < len ; i++)
+ iowrite8(*(buf_tx + i), dst + i);
+ else
+ memcpy_toio(dst, buf_tx, len);
+
+ return len;
+}
+
+static int npcm_fiu_uma_read(struct spi_mem *mem,
+ const struct spi_mem_op *op, u32 addr,
+ bool is_address_size, u8 *data, u32 data_size)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(mem->spi->master);
+ u32 uma_cfg = BIT(10);
+ u32 data_reg[4];
+ int ret;
+ u32 val;
+ u32 i;
+
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_DEV_NUM,
+ (mem->spi->chip_select <<
+ NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CMD,
+ NPCM_FIU_UMA_CMD_CMD, op->cmd.opcode);
+
+ if (is_address_size) {
+ uma_cfg |= ilog2(op->cmd.buswidth);
+ uma_cfg |= ilog2(op->addr.buswidth)
+ << NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
+ if (op->dummy.nbytes)
+ uma_cfg |= ilog2(op->dummy.buswidth)
+ << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
+ uma_cfg |= ilog2(op->data.buswidth)
+ << NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
+ uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
+ uma_cfg |= op->addr.nbytes << NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT;
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, addr);
+ } else {
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, 0x0);
+ }
+
+ uma_cfg |= data_size << NPCM_FIU_UMA_CFG_RDATSIZ_SHIFT;
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_CFG, uma_cfg);
+ regmap_write_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_EXEC_DONE,
+ NPCM_FIU_UMA_CTS_EXEC_DONE);
+ ret = regmap_read_poll_timeout(fiu->regmap, NPCM_FIU_UMA_CTS, val,
+ (!(val & NPCM_FIU_UMA_CTS_EXEC_DONE)), 0,
+ UMA_MICRO_SEC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (data_size) {
+ for (i = 0; i < DIV_ROUND_UP(data_size, 4); i++)
+ regmap_read(fiu->regmap, NPCM_FIU_UMA_DR0 + (i * 4),
+ &data_reg[i]);
+ memcpy(data, data_reg, data_size);
+ }
+
+ return 0;
+}
+
+static int npcm_fiu_uma_write(struct spi_mem *mem,
+ const struct spi_mem_op *op, u8 cmd,
+ bool is_address_size, u8 *data, u32 data_size)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(mem->spi->master);
+ u32 uma_cfg = BIT(10);
+ u32 data_reg[4] = {0};
+ u32 val;
+ u32 i;
+
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_DEV_NUM,
+ (mem->spi->chip_select <<
+ NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
+
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CMD,
+ NPCM_FIU_UMA_CMD_CMD, cmd);
+
+ if (data_size) {
+ memcpy(data_reg, data, data_size);
+ for (i = 0; i < DIV_ROUND_UP(data_size, 4); i++)
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_DW0 + (i * 4),
+ data_reg[i]);
+ }
+
+ if (is_address_size) {
+ uma_cfg |= ilog2(op->cmd.buswidth);
+ uma_cfg |= ilog2(op->addr.buswidth) <<
+ NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
+ uma_cfg |= ilog2(op->data.buswidth) <<
+ NPCM_FIU_UMA_CFG_WDBPCK_SHIFT;
+ uma_cfg |= op->addr.nbytes << NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT;
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, op->addr.val);
+ } else {
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, 0x0);
+ }
+
+ uma_cfg |= (data_size << NPCM_FIU_UMA_CFG_WDATSIZ_SHIFT);
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_CFG, uma_cfg);
+
+ regmap_write_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_EXEC_DONE,
+ NPCM_FIU_UMA_CTS_EXEC_DONE);
+
+ return regmap_read_poll_timeout(fiu->regmap, NPCM_FIU_UMA_CTS, val,
+ (!(val & NPCM_FIU_UMA_CTS_EXEC_DONE)), 0,
+ UMA_MICRO_SEC_TIMEOUT);
+}
+
+static int npcm_fiu_manualwrite(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(mem->spi->master);
+ u8 *data = (u8 *)op->data.buf.out;
+ u32 num_data_chunks;
+ u32 remain_data;
+ u32 idx = 0;
+ int ret;
+
+ num_data_chunks = op->data.nbytes / CHUNK_SIZE;
+ remain_data = op->data.nbytes % CHUNK_SIZE;
+
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_DEV_NUM,
+ (mem->spi->chip_select <<
+ NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_SW_CS, 0);
+
+ ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, true, NULL, 0);
+ if (ret)
+ return ret;
+
+ /* Starting the data writing loop in multiples of 8 */
+ for (idx = 0; idx < num_data_chunks; ++idx) {
+ ret = npcm_fiu_uma_write(mem, op, data[0], false,
+ &data[1], CHUNK_SIZE - 1);
+ if (ret)
+ return ret;
+
+ data += CHUNK_SIZE;
+ }
+
+ /* Handling chunk remains */
+ if (remain_data > 0) {
+ ret = npcm_fiu_uma_write(mem, op, data[0], false,
+ &data[1], remain_data - 1);
+ if (ret)
+ return ret;
+ }
+
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_SW_CS, NPCM_FIU_UMA_CTS_SW_CS);
+
+ return 0;
+}
+
+static int npcm_fiu_read(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ u8 *data = op->data.buf.in;
+ int i, readlen, currlen;
+ u8 *buf_ptr;
+ u32 addr;
+ int ret;
+
+ i = 0;
+ currlen = op->data.nbytes;
+
+ do {
+ addr = ((u32)op->addr.val + i);
+ if (currlen < 16)
+ readlen = currlen;
+ else
+ readlen = 16;
+
+ buf_ptr = data + i;
+ ret = npcm_fiu_uma_read(mem, op, addr, true, buf_ptr,
+ readlen);
+ if (ret)
+ return ret;
+
+ i += readlen;
+ currlen -= 16;
+ } while (currlen > 0);
+
+ return 0;
+}
+
+static void npcm_fiux_set_direct_wr(struct npcm_fiu_spi *fiu)
+{
+ regmap_write(fiu->regmap, NPCM_FIU_DWR_CFG,
+ NPCM_FIU_DWR_16_BYTE_BURST);
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DWR_CFG,
+ NPCM_FIU_DWR_CFG_ABPCK,
+ DWR_ABPCK_4_BIT_PER_CLK << NPCM_FIU_DWR_ABPCK_SHIFT);
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DWR_CFG,
+ NPCM_FIU_DWR_CFG_DBPCK,
+ DWR_DBPCK_4_BIT_PER_CLK << NPCM_FIU_DWR_DBPCK_SHIFT);
+}
+
+static void npcm_fiux_set_direct_rd(struct npcm_fiu_spi *fiu)
+{
+ u32 rx_dummy = 0;
+
+ regmap_write(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_16_BYTE_BURST);
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_ACCTYPE,
+ DRD_SPI_X_MODE << NPCM_FIU_DRD_ACCTYPE_SHIFT);
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_DBW,
+ rx_dummy << NPCM_FIU_DRD_DBW_SHIFT);
+}
+
+static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(mem->spi->master);
+ struct npcm_fiu_chip *chip = &fiu->chip[mem->spi->chip_select];
+ int ret = 0;
+ u8 *buf;
+
+ dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth, op->addr.val,
+ op->data.nbytes);
+
+ if (fiu->spix_mode || op->addr.nbytes > 4)
+ return -ENOTSUPP;
+
+ if (fiu->clkrate != chip->clkrate) {
+ ret = clk_set_rate(fiu->clk, chip->clkrate);
+ if (ret < 0)
+ dev_warn(fiu->dev, "Failed setting %lu frequency, stay at %lu frequency\n",
+ chip->clkrate, fiu->clkrate);
+ else
+ fiu->clkrate = chip->clkrate;
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (!op->addr.nbytes) {
+ buf = op->data.buf.in;
+ ret = npcm_fiu_uma_read(mem, op, op->addr.val, false,
+ buf, op->data.nbytes);
+ } else {
+ ret = npcm_fiu_read(mem, op);
+ }
+ } else {
+ if (!op->addr.nbytes && !op->data.nbytes)
+ ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
+ NULL, 0);
+ if (op->addr.nbytes && !op->data.nbytes) {
+ int i;
+ u8 buf_addr[4];
+ u32 addr = op->addr.val;
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--) {
+ buf_addr[i] = addr & 0xff;
+ addr >>= 8;
+ }
+ ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
+ buf_addr, op->addr.nbytes);
+ }
+ if (!op->addr.nbytes && op->data.nbytes)
+ ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
+ (u8 *)op->data.buf.out,
+ op->data.nbytes);
+ if (op->addr.nbytes && op->data.nbytes)
+ ret = npcm_fiu_manualwrite(mem, op);
+ }
+
+ return ret;
+}
+
+static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(desc->mem->spi->master);
+ struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
+ struct regmap *gcr_regmap;
+
+ if (!fiu->res_mem) {
+ dev_warn(fiu->dev, "Reserved memory not defined, direct read disabled\n");
+ desc->nodirmap = true;
+ return 0;
+ }
+
+ if (!fiu->spix_mode &&
+ desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT) {
+ desc->nodirmap = true;
+ return 0;
+ }
+
+ if (!chip->flash_region_mapped_ptr) {
+ chip->flash_region_mapped_ptr =
+ devm_ioremap(fiu->dev, (fiu->res_mem->start +
+ (fiu->info->max_map_size *
+ desc->mem->spi->chip_select)),
+ (u32)desc->info.length);
+ if (!chip->flash_region_mapped_ptr) {
+ dev_warn(fiu->dev, "Error mapping memory region, direct read disabled\n");
+ desc->nodirmap = true;
+ return 0;
+ }
+ }
+
+ if (of_device_is_compatible(fiu->dev->of_node, "nuvoton,npcm750-fiu")) {
+ gcr_regmap =
+ syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+ if (IS_ERR(gcr_regmap)) {
+ dev_warn(fiu->dev, "Didn't find nuvoton,npcm750-gcr, direct read disabled\n");
+ desc->nodirmap = true;
+ return 0;
+ }
+ regmap_update_bits(gcr_regmap, NPCM7XX_INTCR3_OFFSET,
+ NPCM7XX_INTCR3_FIU_FIX,
+ NPCM7XX_INTCR3_FIU_FIX);
+ } else {
+ regmap_update_bits(fiu->regmap, NPCM_FIU_CFG,
+ NPCM_FIU_CFG_FIU_FIX,
+ NPCM_FIU_CFG_FIU_FIX);
+ }
+
+ if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) {
+ if (!fiu->spix_mode)
+ npcm_fiu_set_drd(fiu, &desc->info.op_tmpl);
+ else
+ npcm_fiux_set_direct_rd(fiu);
+
+ } else {
+ npcm_fiux_set_direct_wr(fiu);
+ }
+
+ return 0;
+}
+
+static int npcm_fiu_setup(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->master;
+ struct npcm_fiu_spi *fiu = spi_controller_get_devdata(ctrl);
+ struct npcm_fiu_chip *chip;
+
+ chip = &fiu->chip[spi->chip_select];
+ chip->fiu = fiu;
+ chip->chipselect = spi->chip_select;
+ chip->clkrate = spi->max_speed_hz;
+
+ fiu->clkrate = clk_get_rate(fiu->clk);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops npcm_fiu_mem_ops = {
+ .exec_op = npcm_fiu_exec_op,
+ .dirmap_create = npcm_fiu_dirmap_create,
+ .dirmap_read = npcm_fiu_direct_read,
+ .dirmap_write = npcm_fiu_direct_write,
+};
+
+static const struct of_device_id npcm_fiu_dt_ids[] = {
+ { .compatible = "nuvoton,npcm750-fiu", .data = &npcm7xx_fiu_data },
+ { .compatible = "nuvoton,npcm845-fiu", .data = &npxm8xx_fiu_data },
+ { /* sentinel */ }
+};
+
+static int npcm_fiu_probe(struct platform_device *pdev)
+{
+ const struct fiu_data *fiu_data_match;
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct npcm_fiu_spi *fiu;
+ void __iomem *regbase;
+ struct resource *res;
+ int id, ret;
+
+ ctrl = devm_spi_alloc_master(dev, sizeof(*fiu));
+ if (!ctrl)
+ return -ENOMEM;
+
+ fiu = spi_controller_get_devdata(ctrl);
+
+ fiu_data_match = of_device_get_match_data(dev);
+ if (!fiu_data_match) {
+ dev_err(dev, "No compatible OF match\n");
+ return -ENODEV;
+ }
+
+ id = of_alias_get_id(dev->of_node, "fiu");
+ if (id < 0 || id >= fiu_data_match->fiu_max) {
+ dev_err(dev, "Invalid platform device id: %d\n", id);
+ return -EINVAL;
+ }
+
+ fiu->info = &fiu_data_match->npcm_fiu_data_info[id];
+
+ platform_set_drvdata(pdev, fiu);
+ fiu->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+ regbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regbase))
+ return PTR_ERR(regbase);
+
+ fiu->regmap = devm_regmap_init_mmio(dev, regbase,
+ &npcm_mtd_regmap_config);
+ if (IS_ERR(fiu->regmap)) {
+ dev_err(dev, "Failed to create regmap\n");
+ return PTR_ERR(fiu->regmap);
+ }
+
+ fiu->res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "memory");
+ fiu->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(fiu->clk))
+ return PTR_ERR(fiu->clk);
+
+ fiu->spix_mode = of_property_read_bool(dev->of_node,
+ "nuvoton,spix-mode");
+
+ platform_set_drvdata(pdev, fiu);
+ clk_prepare_enable(fiu->clk);
+
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
+ | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctrl->setup = npcm_fiu_setup;
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &npcm_fiu_mem_ops;
+ ctrl->num_chipselect = fiu->info->max_cs;
+ ctrl->dev.of_node = dev->of_node;
+
+ ret = devm_spi_register_master(dev, ctrl);
+ if (ret)
+ clk_disable_unprepare(fiu->clk);
+
+ return ret;
+}
+
+static int npcm_fiu_remove(struct platform_device *pdev)
+{
+ struct npcm_fiu_spi *fiu = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(fiu->clk);
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, npcm_fiu_dt_ids);
+
+static struct platform_driver npcm_fiu_driver = {
+ .driver = {
+ .name = "NPCM-FIU",
+ .bus = &platform_bus_type,
+ .of_match_table = npcm_fiu_dt_ids,
+ },
+ .probe = npcm_fiu_probe,
+ .remove = npcm_fiu_remove,
+};
+module_platform_driver(npcm_fiu_driver);
+
+MODULE_DESCRIPTION("Nuvoton FLASH Interface Unit SPI Controller Driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
new file mode 100644
index 000000000..7f2e4d1b0
--- /dev/null
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Nuvoton Technology corporation.
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/reset.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+struct npcm_pspi {
+ struct completion xfer_done;
+ struct reset_control *reset;
+ struct spi_master *master;
+ unsigned int tx_bytes;
+ unsigned int rx_bytes;
+ void __iomem *base;
+ bool is_save_param;
+ u8 bits_per_word;
+ const u8 *tx_buf;
+ struct clk *clk;
+ u32 speed_hz;
+ u8 *rx_buf;
+ u16 mode;
+ u32 id;
+};
+
+#define DRIVER_NAME "npcm-pspi"
+
+#define NPCM_PSPI_DATA 0x00
+#define NPCM_PSPI_CTL1 0x02
+#define NPCM_PSPI_STAT 0x04
+
+/* definitions for control and status register */
+#define NPCM_PSPI_CTL1_SPIEN BIT(0)
+#define NPCM_PSPI_CTL1_MOD BIT(2)
+#define NPCM_PSPI_CTL1_EIR BIT(5)
+#define NPCM_PSPI_CTL1_EIW BIT(6)
+#define NPCM_PSPI_CTL1_SCM BIT(7)
+#define NPCM_PSPI_CTL1_SCIDL BIT(8)
+#define NPCM_PSPI_CTL1_SCDV6_0 GENMASK(15, 9)
+
+#define NPCM_PSPI_STAT_BSY BIT(0)
+#define NPCM_PSPI_STAT_RBF BIT(1)
+
+/* general definitions */
+#define NPCM_PSPI_TIMEOUT_MS 2000
+#define NPCM_PSPI_MAX_CLK_DIVIDER 256
+#define NPCM_PSPI_MIN_CLK_DIVIDER 4
+#define NPCM_PSPI_DEFAULT_CLK 25000000
+
+static inline unsigned int bytes_per_word(unsigned int bits)
+{
+ return bits <= 8 ? 1 : 2;
+}
+
+static inline void npcm_pspi_irq_enable(struct npcm_pspi *priv, u16 mask)
+{
+ u16 val;
+
+ val = ioread16(priv->base + NPCM_PSPI_CTL1);
+ val |= mask;
+ iowrite16(val, priv->base + NPCM_PSPI_CTL1);
+}
+
+static inline void npcm_pspi_irq_disable(struct npcm_pspi *priv, u16 mask)
+{
+ u16 val;
+
+ val = ioread16(priv->base + NPCM_PSPI_CTL1);
+ val &= ~mask;
+ iowrite16(val, priv->base + NPCM_PSPI_CTL1);
+}
+
+static inline void npcm_pspi_enable(struct npcm_pspi *priv)
+{
+ u16 val;
+
+ val = ioread16(priv->base + NPCM_PSPI_CTL1);
+ val |= NPCM_PSPI_CTL1_SPIEN;
+ iowrite16(val, priv->base + NPCM_PSPI_CTL1);
+}
+
+static inline void npcm_pspi_disable(struct npcm_pspi *priv)
+{
+ u16 val;
+
+ val = ioread16(priv->base + NPCM_PSPI_CTL1);
+ val &= ~NPCM_PSPI_CTL1_SPIEN;
+ iowrite16(val, priv->base + NPCM_PSPI_CTL1);
+}
+
+static void npcm_pspi_set_mode(struct spi_device *spi)
+{
+ struct npcm_pspi *priv = spi_master_get_devdata(spi->master);
+ u16 regtemp;
+ u16 mode_val;
+
+ switch (spi->mode & SPI_MODE_X_MASK) {
+ case SPI_MODE_0:
+ mode_val = 0;
+ break;
+ case SPI_MODE_1:
+ mode_val = NPCM_PSPI_CTL1_SCIDL;
+ break;
+ case SPI_MODE_2:
+ mode_val = NPCM_PSPI_CTL1_SCM;
+ break;
+ case SPI_MODE_3:
+ mode_val = NPCM_PSPI_CTL1_SCIDL | NPCM_PSPI_CTL1_SCM;
+ break;
+ }
+
+ regtemp = ioread16(priv->base + NPCM_PSPI_CTL1);
+ regtemp &= ~(NPCM_PSPI_CTL1_SCM | NPCM_PSPI_CTL1_SCIDL);
+ iowrite16(regtemp | mode_val, priv->base + NPCM_PSPI_CTL1);
+}
+
+static void npcm_pspi_set_transfer_size(struct npcm_pspi *priv, int size)
+{
+ u16 regtemp;
+
+ regtemp = ioread16(NPCM_PSPI_CTL1 + priv->base);
+
+ switch (size) {
+ case 8:
+ regtemp &= ~NPCM_PSPI_CTL1_MOD;
+ break;
+ case 16:
+ regtemp |= NPCM_PSPI_CTL1_MOD;
+ break;
+ }
+
+ iowrite16(regtemp, NPCM_PSPI_CTL1 + priv->base);
+}
+
+static void npcm_pspi_set_baudrate(struct npcm_pspi *priv, unsigned int speed)
+{
+ u32 ckdiv;
+ u16 regtemp;
+
+ /* the supported rates are numbers from 4 to 256. */
+ ckdiv = DIV_ROUND_CLOSEST(clk_get_rate(priv->clk), (2 * speed)) - 1;
+
+ regtemp = ioread16(NPCM_PSPI_CTL1 + priv->base);
+ regtemp &= ~NPCM_PSPI_CTL1_SCDV6_0;
+ iowrite16(regtemp | (ckdiv << 9), NPCM_PSPI_CTL1 + priv->base);
+}
+
+static void npcm_pspi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct npcm_pspi *priv = spi_master_get_devdata(spi->master);
+
+ priv->tx_buf = t->tx_buf;
+ priv->rx_buf = t->rx_buf;
+ priv->tx_bytes = t->len;
+ priv->rx_bytes = t->len;
+
+ if (!priv->is_save_param || priv->mode != spi->mode) {
+ npcm_pspi_set_mode(spi);
+ priv->mode = spi->mode;
+ }
+
+ /*
+ * If transfer is even length, and 8 bits per word transfer,
+ * then implement 16 bits-per-word transfer.
+ */
+ if (priv->bits_per_word == 8 && !(t->len & 0x1))
+ t->bits_per_word = 16;
+
+ if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
+ npcm_pspi_set_transfer_size(priv, t->bits_per_word);
+ priv->bits_per_word = t->bits_per_word;
+ }
+
+ if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
+ npcm_pspi_set_baudrate(priv, t->speed_hz);
+ priv->speed_hz = t->speed_hz;
+ }
+
+ if (!priv->is_save_param)
+ priv->is_save_param = true;
+}
+
+static void npcm_pspi_send(struct npcm_pspi *priv)
+{
+ int wsize;
+ u16 val;
+
+ wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
+ priv->tx_bytes -= wsize;
+
+ if (!priv->tx_buf)
+ return;
+
+ switch (wsize) {
+ case 1:
+ val = *priv->tx_buf++;
+ iowrite8(val, NPCM_PSPI_DATA + priv->base);
+ break;
+ case 2:
+ val = *priv->tx_buf++;
+ val = *priv->tx_buf++ | (val << 8);
+ iowrite16(val, NPCM_PSPI_DATA + priv->base);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+}
+
+static void npcm_pspi_recv(struct npcm_pspi *priv)
+{
+ int rsize;
+ u16 val;
+
+ rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
+ priv->rx_bytes -= rsize;
+
+ if (!priv->rx_buf)
+ return;
+
+ switch (rsize) {
+ case 1:
+ *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA);
+ break;
+ case 2:
+ val = ioread16(priv->base + NPCM_PSPI_DATA);
+ *priv->rx_buf++ = (val >> 8);
+ *priv->rx_buf++ = val & 0xff;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+}
+
+static int npcm_pspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct npcm_pspi *priv = spi_master_get_devdata(master);
+ int status;
+
+ npcm_pspi_setup_transfer(spi, t);
+ reinit_completion(&priv->xfer_done);
+ npcm_pspi_enable(priv);
+ status = wait_for_completion_timeout(&priv->xfer_done,
+ msecs_to_jiffies
+ (NPCM_PSPI_TIMEOUT_MS));
+ if (status == 0) {
+ npcm_pspi_disable(priv);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int npcm_pspi_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct npcm_pspi *priv = spi_master_get_devdata(master);
+
+ npcm_pspi_irq_enable(priv, NPCM_PSPI_CTL1_EIR | NPCM_PSPI_CTL1_EIW);
+
+ return 0;
+}
+
+static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct npcm_pspi *priv = spi_master_get_devdata(master);
+
+ npcm_pspi_irq_disable(priv, NPCM_PSPI_CTL1_EIR | NPCM_PSPI_CTL1_EIW);
+
+ return 0;
+}
+
+static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
+{
+ reset_control_assert(priv->reset);
+ udelay(5);
+ reset_control_deassert(priv->reset);
+}
+
+static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
+{
+ struct npcm_pspi *priv = dev_id;
+ u8 stat;
+
+ stat = ioread8(priv->base + NPCM_PSPI_STAT);
+
+ if (!priv->tx_buf && !priv->rx_buf)
+ return IRQ_NONE;
+
+ if (priv->tx_buf) {
+ if (stat & NPCM_PSPI_STAT_RBF) {
+ ioread8(NPCM_PSPI_DATA + priv->base);
+ if (priv->tx_bytes == 0) {
+ npcm_pspi_disable(priv);
+ complete(&priv->xfer_done);
+ return IRQ_HANDLED;
+ }
+ }
+
+ if ((stat & NPCM_PSPI_STAT_BSY) == 0)
+ if (priv->tx_bytes)
+ npcm_pspi_send(priv);
+ }
+
+ if (priv->rx_buf) {
+ if (stat & NPCM_PSPI_STAT_RBF) {
+ if (!priv->rx_bytes)
+ return IRQ_NONE;
+
+ npcm_pspi_recv(priv);
+
+ if (!priv->rx_bytes) {
+ npcm_pspi_disable(priv);
+ complete(&priv->xfer_done);
+ return IRQ_HANDLED;
+ }
+ }
+
+ if (((stat & NPCM_PSPI_STAT_BSY) == 0) && !priv->tx_buf)
+ iowrite8(0x0, NPCM_PSPI_DATA + priv->base);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int npcm_pspi_probe(struct platform_device *pdev)
+{
+ struct npcm_pspi *priv;
+ struct spi_master *master;
+ unsigned long clk_hz;
+ int irq;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*priv));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ priv = spi_master_get_devdata(master);
+ priv->master = master;
+ priv->is_save_param = false;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_master_put;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(priv->clk);
+ goto out_master_put;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_master_put;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_disable_clk;
+ }
+
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto out_disable_clk;
+ }
+
+ /* reset SPI-HW block */
+ npcm_pspi_reset_hw(priv);
+
+ ret = devm_request_irq(&pdev->dev, irq, npcm_pspi_handler, 0,
+ "npcm-pspi", priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto out_disable_clk;
+ }
+
+ init_completion(&priv->xfer_done);
+
+ clk_hz = clk_get_rate(priv->clk);
+
+ master->max_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MIN_CLK_DIVIDER);
+ master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
+ master->mode_bits = SPI_CPHA | SPI_CPOL;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = -1;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->transfer_one = npcm_pspi_transfer_one;
+ master->prepare_transfer_hardware =
+ npcm_pspi_prepare_transfer_hardware;
+ master->unprepare_transfer_hardware =
+ npcm_pspi_unprepare_transfer_hardware;
+ master->use_gpio_descriptors = true;
+
+ /* set to default clock rate */
+ npcm_pspi_set_baudrate(priv, NPCM_PSPI_DEFAULT_CLK);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ goto out_disable_clk;
+
+ pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num);
+
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+out_master_put:
+ spi_master_put(master);
+ return ret;
+}
+
+static int npcm_pspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct npcm_pspi *priv = spi_master_get_devdata(master);
+
+ npcm_pspi_reset_hw(priv);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id npcm_pspi_match[] = {
+ { .compatible = "nuvoton,npcm750-pspi", .data = NULL },
+ { .compatible = "nuvoton,npcm845-pspi", .data = NULL },
+ {}
+};
+MODULE_DEVICE_TABLE(of, npcm_pspi_match);
+
+static struct platform_driver npcm_pspi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = npcm_pspi_match,
+ },
+ .probe = npcm_pspi_probe,
+ .remove = npcm_pspi_remove,
+};
+module_platform_driver(npcm_pspi_driver);
+
+MODULE_DESCRIPTION("NPCM peripheral SPI Controller driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
new file mode 100644
index 000000000..afecf69d3
--- /dev/null
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -0,0 +1,1276 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * NXP FlexSPI(FSPI) controller driver.
+ *
+ * Copyright 2019-2020 NXP
+ * Copyright 2020 Puresoftware Ltd.
+ *
+ * FlexSPI is a flexsible SPI host controller which supports two SPI
+ * channels and up to 4 external devices. Each channel supports
+ * Single/Dual/Quad/Octal mode data transfer (1/2/4/8 bidirectional
+ * data lines).
+ *
+ * FlexSPI controller is driven by the LUT(Look-up Table) registers
+ * LUT registers are a look-up-table for sequences of instructions.
+ * A valid sequence consists of four LUT registers.
+ * Maximum 32 LUT sequences can be programmed simultaneously.
+ *
+ * LUTs are being created at run-time based on the commands passed
+ * from the spi-mem framework, thus using single LUT index.
+ *
+ * Software triggered Flash read/write access by IP Bus.
+ *
+ * Memory mapped read access by AHB Bus.
+ *
+ * Based on SPI MEM interface and spi-fsl-qspi.c driver.
+ *
+ * Author:
+ * Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>
+ * Boris Brezillon <bbrezillon@kernel.org>
+ * Frieder Schrempf <frieder.schrempf@kontron.de>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+#include <linux/sys_soc.h>
+
+#include <linux/mfd/syscon.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/*
+ * The driver only uses one single LUT entry, that is updated on
+ * each call of exec_op(). Index 0 is preset at boot with a basic
+ * read operation, so let's use the last entry (31).
+ */
+#define SEQID_LUT 31
+
+/* Registers used by the driver */
+#define FSPI_MCR0 0x00
+#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24)
+#define FSPI_MCR0_IP_TIMEOUT(x) ((x) << 16)
+#define FSPI_MCR0_LEARN_EN BIT(15)
+#define FSPI_MCR0_SCRFRUN_EN BIT(14)
+#define FSPI_MCR0_OCTCOMB_EN BIT(13)
+#define FSPI_MCR0_DOZE_EN BIT(12)
+#define FSPI_MCR0_HSEN BIT(11)
+#define FSPI_MCR0_SERCLKDIV BIT(8)
+#define FSPI_MCR0_ATDF_EN BIT(7)
+#define FSPI_MCR0_ARDF_EN BIT(6)
+#define FSPI_MCR0_RXCLKSRC(x) ((x) << 4)
+#define FSPI_MCR0_END_CFG(x) ((x) << 2)
+#define FSPI_MCR0_MDIS BIT(1)
+#define FSPI_MCR0_SWRST BIT(0)
+
+#define FSPI_MCR1 0x04
+#define FSPI_MCR1_SEQ_TIMEOUT(x) ((x) << 16)
+#define FSPI_MCR1_AHB_TIMEOUT(x) (x)
+
+#define FSPI_MCR2 0x08
+#define FSPI_MCR2_IDLE_WAIT(x) ((x) << 24)
+#define FSPI_MCR2_SAMEDEVICEEN BIT(15)
+#define FSPI_MCR2_CLRLRPHS BIT(14)
+#define FSPI_MCR2_ABRDATSZ BIT(8)
+#define FSPI_MCR2_ABRLEARN BIT(7)
+#define FSPI_MCR2_ABR_READ BIT(6)
+#define FSPI_MCR2_ABRWRITE BIT(5)
+#define FSPI_MCR2_ABRDUMMY BIT(4)
+#define FSPI_MCR2_ABR_MODE BIT(3)
+#define FSPI_MCR2_ABRCADDR BIT(2)
+#define FSPI_MCR2_ABRRADDR BIT(1)
+#define FSPI_MCR2_ABR_CMD BIT(0)
+
+#define FSPI_AHBCR 0x0c
+#define FSPI_AHBCR_RDADDROPT BIT(6)
+#define FSPI_AHBCR_PREF_EN BIT(5)
+#define FSPI_AHBCR_BUFF_EN BIT(4)
+#define FSPI_AHBCR_CACH_EN BIT(3)
+#define FSPI_AHBCR_CLRTXBUF BIT(2)
+#define FSPI_AHBCR_CLRRXBUF BIT(1)
+#define FSPI_AHBCR_PAR_EN BIT(0)
+
+#define FSPI_INTEN 0x10
+#define FSPI_INTEN_SCLKSBWR BIT(9)
+#define FSPI_INTEN_SCLKSBRD BIT(8)
+#define FSPI_INTEN_DATALRNFL BIT(7)
+#define FSPI_INTEN_IPTXWE BIT(6)
+#define FSPI_INTEN_IPRXWA BIT(5)
+#define FSPI_INTEN_AHBCMDERR BIT(4)
+#define FSPI_INTEN_IPCMDERR BIT(3)
+#define FSPI_INTEN_AHBCMDGE BIT(2)
+#define FSPI_INTEN_IPCMDGE BIT(1)
+#define FSPI_INTEN_IPCMDDONE BIT(0)
+
+#define FSPI_INTR 0x14
+#define FSPI_INTR_SCLKSBWR BIT(9)
+#define FSPI_INTR_SCLKSBRD BIT(8)
+#define FSPI_INTR_DATALRNFL BIT(7)
+#define FSPI_INTR_IPTXWE BIT(6)
+#define FSPI_INTR_IPRXWA BIT(5)
+#define FSPI_INTR_AHBCMDERR BIT(4)
+#define FSPI_INTR_IPCMDERR BIT(3)
+#define FSPI_INTR_AHBCMDGE BIT(2)
+#define FSPI_INTR_IPCMDGE BIT(1)
+#define FSPI_INTR_IPCMDDONE BIT(0)
+
+#define FSPI_LUTKEY 0x18
+#define FSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define FSPI_LCKCR 0x1C
+
+#define FSPI_LCKER_LOCK 0x1
+#define FSPI_LCKER_UNLOCK 0x2
+
+#define FSPI_BUFXCR_INVALID_MSTRID 0xE
+#define FSPI_AHBRX_BUF0CR0 0x20
+#define FSPI_AHBRX_BUF1CR0 0x24
+#define FSPI_AHBRX_BUF2CR0 0x28
+#define FSPI_AHBRX_BUF3CR0 0x2C
+#define FSPI_AHBRX_BUF4CR0 0x30
+#define FSPI_AHBRX_BUF5CR0 0x34
+#define FSPI_AHBRX_BUF6CR0 0x38
+#define FSPI_AHBRX_BUF7CR0 0x3C
+#define FSPI_AHBRXBUF0CR7_PREF BIT(31)
+
+#define FSPI_AHBRX_BUF0CR1 0x40
+#define FSPI_AHBRX_BUF1CR1 0x44
+#define FSPI_AHBRX_BUF2CR1 0x48
+#define FSPI_AHBRX_BUF3CR1 0x4C
+#define FSPI_AHBRX_BUF4CR1 0x50
+#define FSPI_AHBRX_BUF5CR1 0x54
+#define FSPI_AHBRX_BUF6CR1 0x58
+#define FSPI_AHBRX_BUF7CR1 0x5C
+
+#define FSPI_FLSHA1CR0 0x60
+#define FSPI_FLSHA2CR0 0x64
+#define FSPI_FLSHB1CR0 0x68
+#define FSPI_FLSHB2CR0 0x6C
+#define FSPI_FLSHXCR0_SZ_KB 10
+#define FSPI_FLSHXCR0_SZ(x) ((x) >> FSPI_FLSHXCR0_SZ_KB)
+
+#define FSPI_FLSHA1CR1 0x70
+#define FSPI_FLSHA2CR1 0x74
+#define FSPI_FLSHB1CR1 0x78
+#define FSPI_FLSHB2CR1 0x7C
+#define FSPI_FLSHXCR1_CSINTR(x) ((x) << 16)
+#define FSPI_FLSHXCR1_CAS(x) ((x) << 11)
+#define FSPI_FLSHXCR1_WA BIT(10)
+#define FSPI_FLSHXCR1_TCSH(x) ((x) << 5)
+#define FSPI_FLSHXCR1_TCSS(x) (x)
+
+#define FSPI_FLSHA1CR2 0x80
+#define FSPI_FLSHA2CR2 0x84
+#define FSPI_FLSHB1CR2 0x88
+#define FSPI_FLSHB2CR2 0x8C
+#define FSPI_FLSHXCR2_CLRINSP BIT(24)
+#define FSPI_FLSHXCR2_AWRWAIT BIT(16)
+#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13
+#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8
+#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5
+#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0
+
+#define FSPI_IPCR0 0xA0
+
+#define FSPI_IPCR1 0xA4
+#define FSPI_IPCR1_IPAREN BIT(31)
+#define FSPI_IPCR1_SEQNUM_SHIFT 24
+#define FSPI_IPCR1_SEQID_SHIFT 16
+#define FSPI_IPCR1_IDATSZ(x) (x)
+
+#define FSPI_IPCMD 0xB0
+#define FSPI_IPCMD_TRG BIT(0)
+
+#define FSPI_DLPR 0xB4
+
+#define FSPI_IPRXFCR 0xB8
+#define FSPI_IPRXFCR_CLR BIT(0)
+#define FSPI_IPRXFCR_DMA_EN BIT(1)
+#define FSPI_IPRXFCR_WMRK(x) ((x) << 2)
+
+#define FSPI_IPTXFCR 0xBC
+#define FSPI_IPTXFCR_CLR BIT(0)
+#define FSPI_IPTXFCR_DMA_EN BIT(1)
+#define FSPI_IPTXFCR_WMRK(x) ((x) << 2)
+
+#define FSPI_DLLACR 0xC0
+#define FSPI_DLLACR_OVRDEN BIT(8)
+
+#define FSPI_DLLBCR 0xC4
+#define FSPI_DLLBCR_OVRDEN BIT(8)
+
+#define FSPI_STS0 0xE0
+#define FSPI_STS0_DLPHB(x) ((x) << 8)
+#define FSPI_STS0_DLPHA(x) ((x) << 4)
+#define FSPI_STS0_CMD_SRC(x) ((x) << 2)
+#define FSPI_STS0_ARB_IDLE BIT(1)
+#define FSPI_STS0_SEQ_IDLE BIT(0)
+
+#define FSPI_STS1 0xE4
+#define FSPI_STS1_IP_ERRCD(x) ((x) << 24)
+#define FSPI_STS1_IP_ERRID(x) ((x) << 16)
+#define FSPI_STS1_AHB_ERRCD(x) ((x) << 8)
+#define FSPI_STS1_AHB_ERRID(x) (x)
+
+#define FSPI_AHBSPNST 0xEC
+#define FSPI_AHBSPNST_DATLFT(x) ((x) << 16)
+#define FSPI_AHBSPNST_BUFID(x) ((x) << 1)
+#define FSPI_AHBSPNST_ACTIVE BIT(0)
+
+#define FSPI_IPRXFSTS 0xF0
+#define FSPI_IPRXFSTS_RDCNTR(x) ((x) << 16)
+#define FSPI_IPRXFSTS_FILL(x) (x)
+
+#define FSPI_IPTXFSTS 0xF4
+#define FSPI_IPTXFSTS_WRCNTR(x) ((x) << 16)
+#define FSPI_IPTXFSTS_FILL(x) (x)
+
+#define FSPI_RFDR 0x100
+#define FSPI_TFDR 0x180
+
+#define FSPI_LUT_BASE 0x200
+#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
+#define FSPI_LUT_REG(idx) \
+ (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
+
+/* register map end */
+
+/* Instruction set for the LUT register. */
+#define LUT_STOP 0x00
+#define LUT_CMD 0x01
+#define LUT_ADDR 0x02
+#define LUT_CADDR_SDR 0x03
+#define LUT_MODE 0x04
+#define LUT_MODE2 0x05
+#define LUT_MODE4 0x06
+#define LUT_MODE8 0x07
+#define LUT_NXP_WRITE 0x08
+#define LUT_NXP_READ 0x09
+#define LUT_LEARN_SDR 0x0A
+#define LUT_DATSZ_SDR 0x0B
+#define LUT_DUMMY 0x0C
+#define LUT_DUMMY_RWDS_SDR 0x0D
+#define LUT_JMP_ON_CS 0x1F
+#define LUT_CMD_DDR 0x21
+#define LUT_ADDR_DDR 0x22
+#define LUT_CADDR_DDR 0x23
+#define LUT_MODE_DDR 0x24
+#define LUT_MODE2_DDR 0x25
+#define LUT_MODE4_DDR 0x26
+#define LUT_MODE8_DDR 0x27
+#define LUT_WRITE_DDR 0x28
+#define LUT_READ_DDR 0x29
+#define LUT_LEARN_DDR 0x2A
+#define LUT_DATSZ_DDR 0x2B
+#define LUT_DUMMY_DDR 0x2C
+#define LUT_DUMMY_RWDS_DDR 0x2D
+
+/*
+ * Calculate number of required PAD bits for LUT register.
+ *
+ * The pad stands for the number of IO lines [0:7].
+ * For example, the octal read needs eight IO lines,
+ * so you should use LUT_PAD(8). This macro
+ * returns 3 i.e. use eight (2^3) IP lines for read.
+ */
+#define LUT_PAD(x) (fls(x) - 1)
+
+/*
+ * Macro for constructing the LUT entries with the following
+ * register layout:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define PAD_SHIFT 8
+#define INSTR_SHIFT 10
+#define OPRND_SHIFT 16
+
+/* Macros for constructing the LUT register. */
+#define LUT_DEF(idx, ins, pad, opr) \
+ ((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \
+ (opr)) << (((idx) % 2) * OPRND_SHIFT))
+
+#define POLL_TOUT 5000
+#define NXP_FSPI_MAX_CHIPSELECT 4
+#define NXP_FSPI_MIN_IOMAP SZ_4M
+
+#define DCFG_RCWSR1 0x100
+#define SYS_PLL_RAT GENMASK(6, 2)
+
+/* Access flash memory using IP bus only */
+#define FSPI_QUIRK_USE_IP_ONLY BIT(0)
+
+struct nxp_fspi_devtype_data {
+ unsigned int rxfifo;
+ unsigned int txfifo;
+ unsigned int ahb_buf_size;
+ unsigned int quirks;
+ bool little_endian;
+};
+
+static struct nxp_fspi_devtype_data lx2160a_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
+static struct nxp_fspi_devtype_data imx8mm_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
+static struct nxp_fspi_devtype_data imx8qxp_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
+static struct nxp_fspi_devtype_data imx8dxl_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = FSPI_QUIRK_USE_IP_ONLY,
+ .little_endian = true, /* little-endian */
+};
+
+struct nxp_fspi {
+ void __iomem *iobase;
+ void __iomem *ahb_addr;
+ u32 memmap_phy;
+ u32 memmap_phy_size;
+ u32 memmap_start;
+ u32 memmap_len;
+ struct clk *clk, *clk_en;
+ struct device *dev;
+ struct completion c;
+ struct nxp_fspi_devtype_data *devtype_data;
+ struct mutex lock;
+ struct pm_qos_request pm_qos_req;
+ int selected;
+};
+
+static inline int needs_ip_only(struct nxp_fspi *f)
+{
+ return f->devtype_data->quirks & FSPI_QUIRK_USE_IP_ONLY;
+}
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The FSPI controller's endianness is independent of
+ * the CPU core's endianness. So far, although the CPU
+ * core is little-endian the FSPI controller can use
+ * big-endian or little-endian.
+ */
+static void fspi_writel(struct nxp_fspi *f, u32 val, void __iomem *addr)
+{
+ if (f->devtype_data->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static u32 fspi_readl(struct nxp_fspi *f, void __iomem *addr)
+{
+ if (f->devtype_data->little_endian)
+ return ioread32(addr);
+ else
+ return ioread32be(addr);
+}
+
+static irqreturn_t nxp_fspi_irq_handler(int irq, void *dev_id)
+{
+ struct nxp_fspi *f = dev_id;
+ u32 reg;
+
+ /* clear interrupt */
+ reg = fspi_readl(f, f->iobase + FSPI_INTR);
+ fspi_writel(f, FSPI_INTR_IPCMDDONE, f->iobase + FSPI_INTR);
+
+ if (reg & FSPI_INTR_IPCMDDONE)
+ complete(&f->c);
+
+ return IRQ_HANDLED;
+}
+
+static int nxp_fspi_check_buswidth(struct nxp_fspi *f, u8 width)
+{
+ switch (width) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static bool nxp_fspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ ret = nxp_fspi_check_buswidth(f, op->cmd.buswidth);
+
+ if (op->addr.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->addr.buswidth);
+
+ if (op->dummy.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->dummy.buswidth);
+
+ if (op->data.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->data.buswidth);
+
+ if (ret)
+ return false;
+
+ /*
+ * The number of address bytes should be equal to or less than 4 bytes.
+ */
+ if (op->addr.nbytes > 4)
+ return false;
+
+ /*
+ * If requested address value is greater than controller assigned
+ * memory mapped space, return error as it didn't fit in the range
+ * of assigned address space.
+ */
+ if (op->addr.val >= f->memmap_phy_size)
+ return false;
+
+ /* Max 64 dummy clock cycles supported */
+ if (op->dummy.buswidth &&
+ (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
+ return false;
+
+ /* Max data length, check controller limits and alignment */
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ (op->data.nbytes > f->devtype_data->ahb_buf_size ||
+ (op->data.nbytes > f->devtype_data->rxfifo - 4 &&
+ !IS_ALIGNED(op->data.nbytes, 8))))
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT &&
+ op->data.nbytes > f->devtype_data->txfifo)
+ return false;
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+/* Instead of busy looping invoke readl_poll_timeout functionality. */
+static int fspi_readl_poll_tout(struct nxp_fspi *f, void __iomem *base,
+ u32 mask, u32 delay_us,
+ u32 timeout_us, bool c)
+{
+ u32 reg;
+
+ if (!f->devtype_data->little_endian)
+ mask = (u32)cpu_to_be32(mask);
+
+ if (c)
+ return readl_poll_timeout(base, reg, (reg & mask),
+ delay_us, timeout_us);
+ else
+ return readl_poll_timeout(base, reg, !(reg & mask),
+ delay_us, timeout_us);
+}
+
+/*
+ * If the slave device content being changed by Write/Erase, need to
+ * invalidate the AHB buffer. This can be achieved by doing the reset
+ * of controller after setting MCR0[SWRESET] bit.
+ */
+static inline void nxp_fspi_invalid(struct nxp_fspi *f)
+{
+ u32 reg;
+ int ret;
+
+ reg = fspi_readl(f, f->iobase + FSPI_MCR0);
+ fspi_writel(f, reg | FSPI_MCR0_SWRST, f->iobase + FSPI_MCR0);
+
+ /* w1c register, wait unit clear */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
+ FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
+ WARN_ON(ret);
+}
+
+static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ u32 lutval[4] = {};
+ int lutidx = 1, i;
+
+ /* cmd */
+ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode);
+
+ /* addr bytes */
+ if (op->addr.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR,
+ LUT_PAD(op->addr.buswidth),
+ op->addr.nbytes * 8);
+ lutidx++;
+ }
+
+ /* dummy bytes, if needed */
+ if (op->dummy.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
+ /*
+ * Due to FlexSPI controller limitation number of PAD for dummy
+ * buswidth needs to be programmed as equal to data buswidth.
+ */
+ LUT_PAD(op->data.buswidth),
+ op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ lutidx++;
+ }
+
+ /* read/write data bytes */
+ if (op->data.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ LUT_NXP_READ : LUT_NXP_WRITE,
+ LUT_PAD(op->data.buswidth),
+ 0);
+ lutidx++;
+ }
+
+ /* stop condition. */
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
+
+ /* unlock LUT */
+ fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
+ fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR);
+
+ /* fill LUT */
+ for (i = 0; i < ARRAY_SIZE(lutval); i++)
+ fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i));
+
+ dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x], size: 0x%08x\n",
+ op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], op->data.nbytes);
+
+ /* lock LUT */
+ fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
+ fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR);
+}
+
+static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
+{
+ int ret;
+
+ if (is_acpi_node(dev_fwnode(f->dev)))
+ return 0;
+
+ ret = clk_prepare_enable(f->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(f->clk);
+ if (ret) {
+ clk_disable_unprepare(f->clk_en);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
+{
+ if (is_acpi_node(dev_fwnode(f->dev)))
+ return 0;
+
+ clk_disable_unprepare(f->clk);
+ clk_disable_unprepare(f->clk_en);
+
+ return 0;
+}
+
+/*
+ * In FlexSPI controller, flash access is based on value of FSPI_FLSHXXCR0
+ * register and start base address of the slave device.
+ *
+ * (Higher address)
+ * -------- <-- FLSHB2CR0
+ * | B2 |
+ * | |
+ * B2 start address --> -------- <-- FLSHB1CR0
+ * | B1 |
+ * | |
+ * B1 start address --> -------- <-- FLSHA2CR0
+ * | A2 |
+ * | |
+ * A2 start address --> -------- <-- FLSHA1CR0
+ * | A1 |
+ * | |
+ * A1 start address --> -------- (Lower address)
+ *
+ *
+ * Start base address defines the starting address range for given CS and
+ * FSPI_FLSHXXCR0 defines the size of the slave device connected at given CS.
+ *
+ * But, different targets are having different combinations of number of CS,
+ * some targets only have single CS or two CS covering controller's full
+ * memory mapped space area.
+ * Thus, implementation is being done as independent of the size and number
+ * of the connected slave device.
+ * Assign controller memory mapped space size as the size to the connected
+ * slave device.
+ * Mark FLSHxxCR0 as zero initially and then assign value only to the selected
+ * chip-select Flash configuration register.
+ *
+ * For e.g. to access CS2 (B1), FLSHB1CR0 register would be equal to the
+ * memory mapped size of the controller.
+ * Value for rest of the CS FLSHxxCR0 register would be zero.
+ *
+ */
+static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi)
+{
+ unsigned long rate = spi->max_speed_hz;
+ int ret;
+ uint64_t size_kb;
+
+ /*
+ * Return, if previously selected slave device is same as current
+ * requested slave device.
+ */
+ if (f->selected == spi->chip_select)
+ return;
+
+ /* Reset FLSHxxCR0 registers */
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHA1CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHA2CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHB1CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHB2CR0);
+
+ /* Assign controller memory mapped space as size, KBytes, of flash. */
+ size_kb = FSPI_FLSHXCR0_SZ(f->memmap_phy_size);
+
+ fspi_writel(f, size_kb, f->iobase + FSPI_FLSHA1CR0 +
+ 4 * spi->chip_select);
+
+ dev_dbg(f->dev, "Slave device [CS:%x] selected\n", spi->chip_select);
+
+ nxp_fspi_clk_disable_unprep(f);
+
+ ret = clk_set_rate(f->clk, rate);
+ if (ret)
+ return;
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret)
+ return;
+
+ f->selected = spi->chip_select;
+}
+
+static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+{
+ u32 start = op->addr.val;
+ u32 len = op->data.nbytes;
+
+ /* if necessary, ioremap before AHB read */
+ if ((!f->ahb_addr) || start < f->memmap_start ||
+ start + len > f->memmap_start + f->memmap_len) {
+ if (f->ahb_addr)
+ iounmap(f->ahb_addr);
+
+ f->memmap_start = start;
+ f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
+ len : NXP_FSPI_MIN_IOMAP;
+
+ f->ahb_addr = ioremap(f->memmap_phy + f->memmap_start,
+ f->memmap_len);
+
+ if (!f->ahb_addr) {
+ dev_err(f->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* Read out the data directly from the AHB buffer. */
+ memcpy_fromio(op->data.buf.in,
+ f->ahb_addr + start - f->memmap_start, len);
+
+ return 0;
+}
+
+static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int i, ret;
+ u8 *buf = (u8 *) op->data.buf.out;
+
+ /* clear the TX FIFO. */
+ fspi_writel(f, FSPI_IPTXFCR_CLR, base + FSPI_IPTXFCR);
+
+ /*
+ * Default value of water mark level is 8 bytes, hence in single
+ * write request controller can write max 8 bytes of data.
+ */
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 8); i += 8) {
+ /* Wait for TXFIFO empty */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPTXWE, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ fspi_writel(f, *(u32 *) (buf + i), base + FSPI_TFDR);
+ fspi_writel(f, *(u32 *) (buf + i + 4), base + FSPI_TFDR + 4);
+ fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+ }
+
+ if (i < op->data.nbytes) {
+ u32 data = 0;
+ int j;
+ /* Wait for TXFIFO empty */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPTXWE, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) {
+ memcpy(&data, buf + i + j, 4);
+ fspi_writel(f, data, base + FSPI_TFDR + j);
+ }
+ fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+ }
+}
+
+static void nxp_fspi_read_rxfifo(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int i, ret;
+ int len = op->data.nbytes;
+ u8 *buf = (u8 *) op->data.buf.in;
+
+ /*
+ * Default value of water mark level is 8 bytes, hence in single
+ * read request controller can read max 8 bytes of data.
+ */
+ for (i = 0; i < ALIGN_DOWN(len, 8); i += 8) {
+ /* Wait for RXFIFO available */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPRXWA, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ *(u32 *)(buf + i) = fspi_readl(f, base + FSPI_RFDR);
+ *(u32 *)(buf + i + 4) = fspi_readl(f, base + FSPI_RFDR + 4);
+ /* move the FIFO pointer */
+ fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
+ }
+
+ if (i < len) {
+ u32 tmp;
+ int size, j;
+
+ buf = op->data.buf.in + i;
+ /* Wait for RXFIFO available */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPRXWA, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ len = op->data.nbytes - i;
+ for (j = 0; j < op->data.nbytes - i; j += 4) {
+ tmp = fspi_readl(f, base + FSPI_RFDR + j);
+ size = min(len, 4);
+ memcpy(buf + j, &tmp, size);
+ len -= size;
+ }
+ }
+
+ /* invalid the RXFIFO */
+ fspi_writel(f, FSPI_IPRXFCR_CLR, base + FSPI_IPRXFCR);
+ /* move the FIFO pointer */
+ fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
+}
+
+static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int seqnum = 0;
+ int err = 0;
+ u32 reg;
+
+ reg = fspi_readl(f, base + FSPI_IPRXFCR);
+ /* invalid RXFIFO first */
+ reg &= ~FSPI_IPRXFCR_DMA_EN;
+ reg = reg | FSPI_IPRXFCR_CLR;
+ fspi_writel(f, reg, base + FSPI_IPRXFCR);
+
+ init_completion(&f->c);
+
+ fspi_writel(f, op->addr.val, base + FSPI_IPCR0);
+ /*
+ * Always start the sequence at the same index since we update
+ * the LUT at each exec_op() call. And also specify the DATA
+ * length, since it's has not been specified in the LUT.
+ */
+ fspi_writel(f, op->data.nbytes |
+ (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) |
+ (seqnum << FSPI_IPCR1_SEQNUM_SHIFT),
+ base + FSPI_IPCR1);
+
+ /* Trigger the LUT now. */
+ fspi_writel(f, FSPI_IPCMD_TRG, base + FSPI_IPCMD);
+
+ /* Wait for the interrupt. */
+ if (!wait_for_completion_timeout(&f->c, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+ /* Invoke IP data read, if request is of data read. */
+ if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ nxp_fspi_read_rxfifo(f, op);
+
+ return err;
+}
+
+static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+ int err = 0;
+
+ mutex_lock(&f->lock);
+
+ /* Wait for controller being ready. */
+ err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0,
+ FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true);
+ WARN_ON(err);
+
+ nxp_fspi_select_mem(f, mem->spi);
+
+ nxp_fspi_prepare_lut(f, op);
+ /*
+ * If we have large chunks of data, we read them through the AHB bus by
+ * accessing the mapped memory. In all other cases we use IP commands
+ * to access the flash. Read via AHB bus may be corrupted due to
+ * existence of an errata and therefore discard AHB read in such cases.
+ */
+ if (op->data.nbytes > (f->devtype_data->rxfifo - 4) &&
+ op->data.dir == SPI_MEM_DATA_IN &&
+ !needs_ip_only(f)) {
+ err = nxp_fspi_read_ahb(f, op);
+ } else {
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ nxp_fspi_fill_txfifo(f, op);
+
+ err = nxp_fspi_do_op(f, op);
+ }
+
+ /* Invalidate the data in the AHB buffer. */
+ nxp_fspi_invalid(f);
+
+ mutex_unlock(&f->lock);
+
+ return err;
+}
+
+static int nxp_fspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes > f->devtype_data->txfifo)
+ op->data.nbytes = f->devtype_data->txfifo;
+ } else {
+ if (op->data.nbytes > f->devtype_data->ahb_buf_size)
+ op->data.nbytes = f->devtype_data->ahb_buf_size;
+ else if (op->data.nbytes > (f->devtype_data->rxfifo - 4))
+ op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
+ }
+
+ /* Limit data bytes to RX FIFO in case of IP read only */
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ needs_ip_only(f) &&
+ op->data.nbytes > f->devtype_data->rxfifo)
+ op->data.nbytes = f->devtype_data->rxfifo;
+
+ return 0;
+}
+
+static void erratum_err050568(struct nxp_fspi *f)
+{
+ const struct soc_device_attribute ls1028a_soc_attr[] = {
+ { .family = "QorIQ LS1028A" },
+ { /* sentinel */ }
+ };
+ struct regmap *map;
+ u32 val, sys_pll_ratio;
+ int ret;
+
+ /* Check for LS1028A family */
+ if (!soc_device_match(ls1028a_soc_attr)) {
+ dev_dbg(f->dev, "Errata applicable only for LS1028A\n");
+ return;
+ }
+
+ map = syscon_regmap_lookup_by_compatible("fsl,ls1028a-dcfg");
+ if (IS_ERR(map)) {
+ dev_err(f->dev, "No syscon regmap\n");
+ goto err;
+ }
+
+ ret = regmap_read(map, DCFG_RCWSR1, &val);
+ if (ret < 0)
+ goto err;
+
+ sys_pll_ratio = FIELD_GET(SYS_PLL_RAT, val);
+ dev_dbg(f->dev, "val: 0x%08x, sys_pll_ratio: %d\n", val, sys_pll_ratio);
+
+ /* Use IP bus only if platform clock is 300MHz */
+ if (sys_pll_ratio == 3)
+ f->devtype_data->quirks |= FSPI_QUIRK_USE_IP_ONLY;
+
+ return;
+
+err:
+ dev_err(f->dev, "Errata cannot be executed. Read via IP bus may not work\n");
+}
+
+static int nxp_fspi_default_setup(struct nxp_fspi *f)
+{
+ void __iomem *base = f->iobase;
+ int ret, i;
+ u32 reg;
+
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ nxp_fspi_clk_disable_unprep(f);
+
+ /* the default frequency, we will change it later if necessary. */
+ ret = clk_set_rate(f->clk, 20000000);
+ if (ret)
+ return ret;
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret)
+ return ret;
+
+ /*
+ * ERR050568: Flash access by FlexSPI AHB command may not work with
+ * platform frequency equal to 300 MHz on LS1028A.
+ * LS1028A reuses LX2160A compatible entry. Make errata applicable for
+ * Layerscape LS1028A platform.
+ */
+ if (of_device_is_compatible(f->dev->of_node, "nxp,lx2160a-fspi"))
+ erratum_err050568(f);
+
+ /* Reset the module */
+ /* w1c register, wait unit clear */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
+ FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
+ WARN_ON(ret);
+
+ /* Disable the module */
+ fspi_writel(f, FSPI_MCR0_MDIS, base + FSPI_MCR0);
+
+ /* Reset the DLL register to default value */
+ fspi_writel(f, FSPI_DLLACR_OVRDEN, base + FSPI_DLLACR);
+ fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR);
+
+ /* enable module */
+ fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) |
+ FSPI_MCR0_IP_TIMEOUT(0xFF) | (u32) FSPI_MCR0_OCTCOMB_EN,
+ base + FSPI_MCR0);
+
+ /*
+ * Disable same device enable bit and configure all slave devices
+ * independently.
+ */
+ reg = fspi_readl(f, f->iobase + FSPI_MCR2);
+ reg = reg & ~(FSPI_MCR2_SAMEDEVICEEN);
+ fspi_writel(f, reg, base + FSPI_MCR2);
+
+ /* AHB configuration for access buffer 0~7. */
+ for (i = 0; i < 7; i++)
+ fspi_writel(f, 0, base + FSPI_AHBRX_BUF0CR0 + 4 * i);
+
+ /*
+ * Set ADATSZ with the maximum AHB buffer size to improve the read
+ * performance.
+ */
+ fspi_writel(f, (f->devtype_data->ahb_buf_size / 8 |
+ FSPI_AHBRXBUF0CR7_PREF), base + FSPI_AHBRX_BUF7CR0);
+
+ /* prefetch and no start address alignment limitation */
+ fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
+ base + FSPI_AHBCR);
+
+ /* Reset the FLSHxCR1 registers. */
+ reg = FSPI_FLSHXCR1_TCSH(0x3) | FSPI_FLSHXCR1_TCSS(0x3);
+ fspi_writel(f, reg, base + FSPI_FLSHA1CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHA2CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHB1CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
+
+ /* AHB Read - Set lut sequence ID for all CS. */
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2);
+
+ f->selected = -1;
+
+ /* enable the interrupt */
+ fspi_writel(f, FSPI_INTEN_IPCMDDONE, base + FSPI_INTEN);
+
+ return 0;
+}
+
+static const char *nxp_fspi_get_name(struct spi_mem *mem)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+ struct device *dev = &mem->spi->dev;
+ const char *name;
+
+ // Set custom name derived from the platform_device of the controller.
+ if (of_get_available_child_count(f->dev->of_node) == 1)
+ return dev_name(f->dev);
+
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d", dev_name(f->dev),
+ mem->spi->chip_select);
+
+ if (!name) {
+ dev_err(dev, "failed to get memory for custom flash name\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return name;
+}
+
+static const struct spi_controller_mem_ops nxp_fspi_mem_ops = {
+ .adjust_op_size = nxp_fspi_adjust_op_size,
+ .supports_op = nxp_fspi_supports_op,
+ .exec_op = nxp_fspi_exec_op,
+ .get_name = nxp_fspi_get_name,
+};
+
+static int nxp_fspi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ struct nxp_fspi *f;
+ int ret;
+ u32 reg;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*f));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL |
+ SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL;
+
+ f = spi_controller_get_devdata(ctlr);
+ f->dev = dev;
+ f->devtype_data = (struct nxp_fspi_devtype_data *)device_get_match_data(dev);
+ if (!f->devtype_data) {
+ ret = -ENODEV;
+ goto err_put_ctrl;
+ }
+
+ platform_set_drvdata(pdev, f);
+
+ /* find the resources - configuration register address space */
+ if (is_acpi_node(dev_fwnode(f->dev)))
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ else
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "fspi_base");
+
+ f->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(f->iobase)) {
+ ret = PTR_ERR(f->iobase);
+ goto err_put_ctrl;
+ }
+
+ /* find the resources - controller memory mapped space */
+ if (is_acpi_node(dev_fwnode(f->dev)))
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ else
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "fspi_mmap");
+
+ if (!res) {
+ ret = -ENODEV;
+ goto err_put_ctrl;
+ }
+
+ /* assign memory mapped starting address and mapped size. */
+ f->memmap_phy = res->start;
+ f->memmap_phy_size = resource_size(res);
+
+ /* find the clocks */
+ if (dev_of_node(&pdev->dev)) {
+ f->clk_en = devm_clk_get(dev, "fspi_en");
+ if (IS_ERR(f->clk_en)) {
+ ret = PTR_ERR(f->clk_en);
+ goto err_put_ctrl;
+ }
+
+ f->clk = devm_clk_get(dev, "fspi");
+ if (IS_ERR(f->clk)) {
+ ret = PTR_ERR(f->clk);
+ goto err_put_ctrl;
+ }
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ goto err_put_ctrl;
+ }
+ }
+
+ /* Clear potential interrupts */
+ reg = fspi_readl(f, f->iobase + FSPI_INTR);
+ if (reg)
+ fspi_writel(f, reg, f->iobase + FSPI_INTR);
+
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_disable_clk;
+
+ ret = devm_request_irq(dev, ret,
+ nxp_fspi_irq_handler, 0, pdev->name, f);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto err_disable_clk;
+ }
+
+ mutex_init(&f->lock);
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
+ ctlr->mem_ops = &nxp_fspi_mem_ops;
+
+ nxp_fspi_default_setup(f);
+
+ ctlr->dev.of_node = np;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret)
+ goto err_destroy_mutex;
+
+ return 0;
+
+err_destroy_mutex:
+ mutex_destroy(&f->lock);
+
+err_disable_clk:
+ nxp_fspi_clk_disable_unprep(f);
+
+err_put_ctrl:
+ spi_controller_put(ctlr);
+
+ dev_err(dev, "NXP FSPI probe failed\n");
+ return ret;
+}
+
+static int nxp_fspi_remove(struct platform_device *pdev)
+{
+ struct nxp_fspi *f = platform_get_drvdata(pdev);
+
+ /* disable the hardware */
+ fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0);
+
+ nxp_fspi_clk_disable_unprep(f);
+
+ mutex_destroy(&f->lock);
+
+ if (f->ahb_addr)
+ iounmap(f->ahb_addr);
+
+ return 0;
+}
+
+static int nxp_fspi_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int nxp_fspi_resume(struct device *dev)
+{
+ struct nxp_fspi *f = dev_get_drvdata(dev);
+
+ nxp_fspi_default_setup(f);
+
+ return 0;
+}
+
+static const struct of_device_id nxp_fspi_dt_ids[] = {
+ { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
+ { .compatible = "nxp,imx8mm-fspi", .data = (void *)&imx8mm_data, },
+ { .compatible = "nxp,imx8mp-fspi", .data = (void *)&imx8mm_data, },
+ { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, },
+ { .compatible = "nxp,imx8dxl-fspi", .data = (void *)&imx8dxl_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id nxp_fspi_acpi_ids[] = {
+ { "NXP0009", .driver_data = (kernel_ulong_t)&lx2160a_data, },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, nxp_fspi_acpi_ids);
+#endif
+
+static const struct dev_pm_ops nxp_fspi_pm_ops = {
+ .suspend = nxp_fspi_suspend,
+ .resume = nxp_fspi_resume,
+};
+
+static struct platform_driver nxp_fspi_driver = {
+ .driver = {
+ .name = "nxp-fspi",
+ .of_match_table = nxp_fspi_dt_ids,
+ .acpi_match_table = ACPI_PTR(nxp_fspi_acpi_ids),
+ .pm = &nxp_fspi_pm_ops,
+ },
+ .probe = nxp_fspi_probe,
+ .remove = nxp_fspi_remove,
+};
+module_platform_driver(nxp_fspi_driver);
+
+MODULE_DESCRIPTION("NXP FSPI Controller Driver");
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_AUTHOR("Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>");
+MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
+MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
new file mode 100644
index 000000000..38c14c4e4
--- /dev/null
+++ b/drivers/spi/spi-oc-tiny.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OpenCores tiny SPI master driver
+ *
+ * https://opencores.org/project,tiny_spi
+ *
+ * Copyright (C) 2011 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on spi_s3c24xx.c, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/spi_oc_tiny.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#define DRV_NAME "spi_oc_tiny"
+
+#define TINY_SPI_RXDATA 0
+#define TINY_SPI_TXDATA 4
+#define TINY_SPI_STATUS 8
+#define TINY_SPI_CONTROL 12
+#define TINY_SPI_BAUD 16
+
+#define TINY_SPI_STATUS_TXE 0x1
+#define TINY_SPI_STATUS_TXR 0x2
+
+struct tiny_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *base;
+ int irq;
+ unsigned int freq;
+ unsigned int baudwidth;
+ unsigned int baud;
+ unsigned int speed_hz;
+ unsigned int mode;
+ unsigned int len;
+ unsigned int txc, rxc;
+ const u8 *txp;
+ u8 *rxp;
+};
+
+static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
+}
+
+static int tiny_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+ unsigned int baud = hw->baud;
+
+ if (t) {
+ if (t->speed_hz && t->speed_hz != hw->speed_hz)
+ baud = tiny_spi_baud(spi, t->speed_hz);
+ }
+ writel(baud, hw->base + TINY_SPI_BAUD);
+ writel(hw->mode, hw->base + TINY_SPI_CONTROL);
+ return 0;
+}
+
+static int tiny_spi_setup(struct spi_device *spi)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ if (spi->max_speed_hz != hw->speed_hz) {
+ hw->speed_hz = spi->max_speed_hz;
+ hw->baud = tiny_spi_baud(spi, hw->speed_hz);
+ }
+ hw->mode = spi->mode & SPI_MODE_X_MASK;
+ return 0;
+}
+
+static inline void tiny_spi_wait_txr(struct tiny_spi *hw)
+{
+ while (!(readb(hw->base + TINY_SPI_STATUS) &
+ TINY_SPI_STATUS_TXR))
+ cpu_relax();
+}
+
+static inline void tiny_spi_wait_txe(struct tiny_spi *hw)
+{
+ while (!(readb(hw->base + TINY_SPI_STATUS) &
+ TINY_SPI_STATUS_TXE))
+ cpu_relax();
+}
+
+static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+ const u8 *txp = t->tx_buf;
+ u8 *rxp = t->rx_buf;
+ unsigned int i;
+
+ if (hw->irq >= 0) {
+ /* use interrupt driven data transfer */
+ hw->len = t->len;
+ hw->txp = t->tx_buf;
+ hw->rxp = t->rx_buf;
+ hw->txc = 0;
+ hw->rxc = 0;
+
+ /* send the first byte */
+ if (t->len > 1) {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXR, hw->base + TINY_SPI_STATUS);
+ } else {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXE, hw->base + TINY_SPI_STATUS);
+ }
+
+ wait_for_completion(&hw->done);
+ } else {
+ /* we need to tighten the transfer loop */
+ writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
+ for (i = 1; i < t->len; i++) {
+ writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
+
+ if (rxp || (i != t->len - 1))
+ tiny_spi_wait_txr(hw);
+ if (rxp)
+ *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
+ }
+ tiny_spi_wait_txe(hw);
+ if (rxp)
+ *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
+ }
+
+ return t->len;
+}
+
+static irqreturn_t tiny_spi_irq(int irq, void *dev)
+{
+ struct tiny_spi *hw = dev;
+
+ writeb(0, hw->base + TINY_SPI_STATUS);
+ if (hw->rxc + 1 == hw->len) {
+ if (hw->rxp)
+ *hw->rxp++ = readb(hw->base + TINY_SPI_RXDATA);
+ hw->rxc++;
+ complete(&hw->done);
+ } else {
+ if (hw->rxp)
+ *hw->rxp++ = readb(hw->base + TINY_SPI_TXDATA);
+ hw->rxc++;
+ if (hw->txc < hw->len) {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXR,
+ hw->base + TINY_SPI_STATUS);
+ } else {
+ writeb(TINY_SPI_STATUS_TXE,
+ hw->base + TINY_SPI_STATUS);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_OF
+#include <linux/of_gpio.h>
+
+static int tiny_spi_of_probe(struct platform_device *pdev)
+{
+ struct tiny_spi *hw = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+
+ if (!np)
+ return 0;
+ hw->bitbang.master->dev.of_node = pdev->dev.of_node;
+ if (!of_property_read_u32(np, "clock-frequency", &val))
+ hw->freq = val;
+ if (!of_property_read_u32(np, "baud-width", &val))
+ hw->baudwidth = val;
+ return 0;
+}
+#else /* !CONFIG_OF */
+static int tiny_spi_of_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static int tiny_spi_probe(struct platform_device *pdev)
+{
+ struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
+ struct tiny_spi *hw;
+ struct spi_master *master;
+ int err = -ENODEV;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
+ if (!master)
+ return err;
+
+ /* setup the master state. */
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tiny_spi_setup;
+ master->use_gpio_descriptors = true;
+
+ hw = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, hw);
+
+ /* setup the state for the bitbang driver */
+ hw->bitbang.master = master;
+ hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
+ hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
+
+ /* find and map our resources */
+ hw->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hw->base)) {
+ err = PTR_ERR(hw->base);
+ goto exit;
+ }
+ /* irq is optional */
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq >= 0) {
+ init_completion(&hw->done);
+ err = devm_request_irq(&pdev->dev, hw->irq, tiny_spi_irq, 0,
+ pdev->name, hw);
+ if (err)
+ goto exit;
+ }
+ /* find platform data */
+ if (platp) {
+ hw->freq = platp->freq;
+ hw->baudwidth = platp->baudwidth;
+ } else {
+ err = tiny_spi_of_probe(pdev);
+ if (err)
+ goto exit;
+ }
+
+ /* register our spi controller */
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err)
+ goto exit;
+ dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
+
+ return 0;
+
+exit:
+ spi_master_put(master);
+ return err;
+}
+
+static int tiny_spi_remove(struct platform_device *pdev)
+{
+ struct tiny_spi *hw = platform_get_drvdata(pdev);
+ struct spi_master *master = hw->bitbang.master;
+
+ spi_bitbang_stop(&hw->bitbang);
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tiny_spi_match[] = {
+ { .compatible = "opencores,tiny-spi-rtlsvn2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tiny_spi_match);
+#endif /* CONFIG_OF */
+
+static struct platform_driver tiny_spi_driver = {
+ .probe = tiny_spi_probe,
+ .remove = tiny_spi_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = NULL,
+ .of_match_table = of_match_ptr(tiny_spi_match),
+ },
+};
+module_platform_driver(tiny_spi_driver);
+
+MODULE_DESCRIPTION("OpenCores tiny SPI driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
new file mode 100644
index 000000000..061f7394e
--- /dev/null
+++ b/drivers/spi/spi-omap-100k.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OMAP7xx SPI 100k controller driver
+ * Author: Fabrice Crohas <fcrohas@gmail.com>
+ * from original omap1_mcspi driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
+ * Juha Yrjola <juha.yrjola@nokia.com>
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <linux/spi/spi.h>
+
+#define OMAP1_SPI100K_MAX_FREQ 48000000
+
+#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12)
+
+#define SPI_SETUP1 0x00
+#define SPI_SETUP2 0x02
+#define SPI_CTRL 0x04
+#define SPI_STATUS 0x06
+#define SPI_TX_LSB 0x08
+#define SPI_TX_MSB 0x0a
+#define SPI_RX_LSB 0x0c
+#define SPI_RX_MSB 0x0e
+
+#define SPI_SETUP1_INT_READ_ENABLE (1UL << 5)
+#define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4)
+#define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1)
+#define SPI_SETUP1_CLOCK_ENABLE (1UL << 0)
+
+#define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0)
+#define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0)
+#define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5)
+#define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5)
+#define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10)
+#define SPI_SETUP2_EDGE_TRIGGER (1UL << 10)
+
+#define SPI_CTRL_SEN(x) ((x) << 7)
+#define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2)
+#define SPI_CTRL_WR (1UL << 1)
+#define SPI_CTRL_RD (1UL << 0)
+
+#define SPI_STATUS_WE (1UL << 1)
+#define SPI_STATUS_RD (1UL << 0)
+
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 8
+
+#define SPI_RUNNING 0
+#define SPI_SHUTDOWN 1
+
+struct omap1_spi100k {
+ struct clk *ick;
+ struct clk *fck;
+
+ /* Virtual base address of the controller */
+ void __iomem *base;
+};
+
+struct omap1_spi100k_cs {
+ void __iomem *base;
+ int word_len;
+};
+
+static void spi100k_enable_clock(struct spi_master *master)
+{
+ unsigned int val;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* enable SPI */
+ val = readw(spi100k->base + SPI_SETUP1);
+ val |= SPI_SETUP1_CLOCK_ENABLE;
+ writew(val, spi100k->base + SPI_SETUP1);
+}
+
+static void spi100k_disable_clock(struct spi_master *master)
+{
+ unsigned int val;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* disable SPI */
+ val = readw(spi100k->base + SPI_SETUP1);
+ val &= ~SPI_SETUP1_CLOCK_ENABLE;
+ writew(val, spi100k->base + SPI_SETUP1);
+}
+
+static void spi100k_write_data(struct spi_master *master, int len, int data)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* write 16-bit word, shifting 8-bit data if necessary */
+ if (len <= 8) {
+ data <<= 8;
+ len = 16;
+ }
+
+ spi100k_enable_clock(master);
+ writew(data, spi100k->base + SPI_TX_MSB);
+
+ writew(SPI_CTRL_SEN(0) |
+ SPI_CTRL_WORD_SIZE(len) |
+ SPI_CTRL_WR,
+ spi100k->base + SPI_CTRL);
+
+ /* Wait for bit ack send change */
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE)
+ ;
+ udelay(1000);
+
+ spi100k_disable_clock(master);
+}
+
+static int spi100k_read_data(struct spi_master *master, int len)
+{
+ int dataL;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* Always do at least 16 bits */
+ if (len <= 8)
+ len = 16;
+
+ spi100k_enable_clock(master);
+ writew(SPI_CTRL_SEN(0) |
+ SPI_CTRL_WORD_SIZE(len) |
+ SPI_CTRL_RD,
+ spi100k->base + SPI_CTRL);
+
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD)
+ ;
+ udelay(1000);
+
+ dataL = readw(spi100k->base + SPI_RX_LSB);
+ readw(spi100k->base + SPI_RX_MSB);
+ spi100k_disable_clock(master);
+
+ return dataL;
+}
+
+static void spi100k_open(struct spi_master *master)
+{
+ /* get control of SPI */
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ writew(SPI_SETUP1_INT_READ_ENABLE |
+ SPI_SETUP1_INT_WRITE_ENABLE |
+ SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1);
+
+ /* configure clock and interrupts */
+ writew(SPI_SETUP2_ACTIVE_EDGE_FALLING |
+ SPI_SETUP2_NEGATIVE_LEVEL |
+ SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2);
+}
+
+static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
+{
+ if (enable)
+ writew(0x05fc, spi100k->base + SPI_CTRL);
+ else
+ writew(0x05fd, spi100k->base + SPI_CTRL);
+}
+
+static unsigned
+omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+ unsigned int count, c;
+ int word_len;
+
+ count = xfer->len;
+ c = count;
+ word_len = cs->word_len;
+
+ if (word_len <= 8) {
+ u8 *rx;
+ const u8 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c -= 1;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master, word_len, *tx++);
+ if (xfer->rx_buf != NULL)
+ *rx++ = spi100k_read_data(spi->master, word_len);
+ } while (c);
+ } else if (word_len <= 16) {
+ u16 *rx;
+ const u16 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c -= 2;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master, word_len, *tx++);
+ if (xfer->rx_buf != NULL)
+ *rx++ = spi100k_read_data(spi->master, word_len);
+ } while (c);
+ } else if (word_len <= 32) {
+ u32 *rx;
+ const u32 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c -= 4;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master, word_len, *tx);
+ if (xfer->rx_buf != NULL)
+ *rx = spi100k_read_data(spi->master, word_len);
+ } while (c);
+ }
+ return count - c;
+}
+
+/* called only when no transfer is active to this device */
+static int omap1_spi100k_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master);
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+ u8 word_len;
+
+ if (t != NULL)
+ word_len = t->bits_per_word;
+ else
+ word_len = spi->bits_per_word;
+
+ if (word_len > 32)
+ return -EINVAL;
+ cs->word_len = word_len;
+
+ /* SPI init before transfer */
+ writew(0x3e, spi100k->base + SPI_SETUP1);
+ writew(0x00, spi100k->base + SPI_STATUS);
+ writew(0x3e, spi100k->base + SPI_CTRL);
+
+ return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
+static int omap1_spi100k_setup(struct spi_device *spi)
+{
+ int ret;
+ struct omap1_spi100k *spi100k;
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+
+ spi100k = spi_master_get_devdata(spi->master);
+
+ if (!cs) {
+ cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ cs->base = spi100k->base + spi->chip_select * 0x14;
+ spi->controller_state = cs;
+ }
+
+ spi100k_open(spi->master);
+
+ clk_prepare_enable(spi100k->ick);
+ clk_prepare_enable(spi100k->fck);
+
+ ret = omap1_spi100k_setup_transfer(spi, NULL);
+
+ clk_disable_unprepare(spi100k->ick);
+ clk_disable_unprepare(spi100k->fck);
+
+ return ret;
+}
+
+static int omap1_spi100k_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t = NULL;
+ int cs_active = 0;
+ int status = 0;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ break;
+ }
+ status = omap1_spi100k_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+
+ if (!cs_active) {
+ omap1_spi100k_force_cs(spi100k, 1);
+ cs_active = 1;
+ }
+
+ if (t->len) {
+ unsigned count;
+
+ count = omap1_spi100k_txrx_pio(spi, t);
+ m->actual_length += count;
+
+ if (count != t->len) {
+ break;
+ }
+ }
+
+ spi_transfer_delay_exec(t);
+
+ /* ignore the "leave it on after last xfer" hint */
+
+ if (t->cs_change) {
+ omap1_spi100k_force_cs(spi100k, 0);
+ cs_active = 0;
+ }
+ }
+
+ status = omap1_spi100k_setup_transfer(spi, NULL);
+
+ if (cs_active)
+ omap1_spi100k_force_cs(spi100k, 0);
+
+ m->status = status;
+
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static int omap1_spi100k_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct omap1_spi100k *spi100k;
+ int status = 0;
+
+ if (!pdev->id)
+ return -EINVAL;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi100k));
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+
+ master->setup = omap1_spi100k_setup;
+ master->transfer_one_message = omap1_spi100k_transfer_one_message;
+ master->num_chipselect = 2;
+ master->mode_bits = MODEBITS;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16);
+ master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ;
+ master->auto_runtime_pm = true;
+
+ spi100k = spi_master_get_devdata(master);
+
+ /*
+ * The memory region base address is taken as the platform_data.
+ * You should allocate this with ioremap() before initializing
+ * the SPI.
+ */
+ spi100k->base = (void __iomem *)dev_get_platdata(&pdev->dev);
+
+ spi100k->ick = devm_clk_get(&pdev->dev, "ick");
+ if (IS_ERR(spi100k->ick)) {
+ dev_dbg(&pdev->dev, "can't get spi100k_ick\n");
+ status = PTR_ERR(spi100k->ick);
+ goto err;
+ }
+
+ spi100k->fck = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(spi100k->fck)) {
+ dev_dbg(&pdev->dev, "can't get spi100k_fck\n");
+ status = PTR_ERR(spi100k->fck);
+ goto err;
+ }
+
+ status = clk_prepare_enable(spi100k->ick);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to enable ick: %d\n", status);
+ goto err;
+ }
+
+ status = clk_prepare_enable(spi100k->fck);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to enable fck: %d\n", status);
+ goto err_ick;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+
+ status = devm_spi_register_master(&pdev->dev, master);
+ if (status < 0)
+ goto err_fck;
+
+ return status;
+
+err_fck:
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(spi100k->fck);
+err_ick:
+ clk_disable_unprepare(spi100k->ick);
+err:
+ spi_master_put(master);
+ return status;
+}
+
+static int omap1_spi100k_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(spi100k->fck);
+ clk_disable_unprepare(spi100k->ick);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int omap1_spi100k_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spi100k->ick);
+ clk_disable_unprepare(spi100k->fck);
+
+ return 0;
+}
+
+static int omap1_spi100k_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(spi100k->ick);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable ick: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(spi100k->fck);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable fck: %d\n", ret);
+ clk_disable_unprepare(spi100k->ick);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap1_spi100k_pm = {
+ SET_RUNTIME_PM_OPS(omap1_spi100k_runtime_suspend,
+ omap1_spi100k_runtime_resume, NULL)
+};
+
+static struct platform_driver omap1_spi100k_driver = {
+ .driver = {
+ .name = "omap1_spi100k",
+ .pm = &omap1_spi100k_pm,
+ },
+ .probe = omap1_spi100k_probe,
+ .remove = omap1_spi100k_remove,
+};
+
+module_platform_driver(omap1_spi100k_driver);
+
+MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
+MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
new file mode 100644
index 000000000..29198e681
--- /dev/null
+++ b/drivers/spi/spi-omap-uwire.c
@@ -0,0 +1,560 @@
+/*
+ * MicroWire interface driver for OMAP
+ *
+ * Copyright 2003 MontaVista Software Inc. <source@mvista.com>
+ *
+ * Ported to 2.6 OMAP uwire interface.
+ * Copyright (C) 2004 Texas Instruments.
+ *
+ * Generalization patches by Juha Yrjola <juha.yrjola@nokia.com>
+ *
+ * Copyright (C) 2005 David Brownell (ported to 2.6 SPI interface)
+ * Copyright (C) 2006 Nokia
+ *
+ * Many updates by Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <linux/soc/ti/omap1-io.h>
+#include <linux/soc/ti/omap1-soc.h>
+#include <linux/soc/ti/omap1-mux.h>
+
+/* FIXME address is now a platform device resource,
+ * and irqs should show there too...
+ */
+#define UWIRE_BASE_PHYS 0xFFFB3000
+
+/* uWire Registers: */
+#define UWIRE_IO_SIZE 0x20
+#define UWIRE_TDR 0x00
+#define UWIRE_RDR 0x00
+#define UWIRE_CSR 0x01
+#define UWIRE_SR1 0x02
+#define UWIRE_SR2 0x03
+#define UWIRE_SR3 0x04
+#define UWIRE_SR4 0x05
+#define UWIRE_SR5 0x06
+
+/* CSR bits */
+#define RDRB (1 << 15)
+#define CSRB (1 << 14)
+#define START (1 << 13)
+#define CS_CMD (1 << 12)
+
+/* SR1 or SR2 bits */
+#define UWIRE_READ_FALLING_EDGE 0x0001
+#define UWIRE_READ_RISING_EDGE 0x0000
+#define UWIRE_WRITE_FALLING_EDGE 0x0000
+#define UWIRE_WRITE_RISING_EDGE 0x0002
+#define UWIRE_CS_ACTIVE_LOW 0x0000
+#define UWIRE_CS_ACTIVE_HIGH 0x0004
+#define UWIRE_FREQ_DIV_2 0x0000
+#define UWIRE_FREQ_DIV_4 0x0008
+#define UWIRE_FREQ_DIV_8 0x0010
+#define UWIRE_CHK_READY 0x0020
+#define UWIRE_CLK_INVERTED 0x0040
+
+
+struct uwire_spi {
+ struct spi_bitbang bitbang;
+ struct clk *ck;
+};
+
+struct uwire_state {
+ unsigned div1_idx;
+};
+
+/* REVISIT compile time constant for idx_shift? */
+/*
+ * Or, put it in a structure which is used throughout the driver;
+ * that avoids having to issue two loads for each bit of static data.
+ */
+static unsigned int uwire_idx_shift;
+static void __iomem *uwire_base;
+
+static inline void uwire_write_reg(int idx, u16 val)
+{
+ __raw_writew(val, uwire_base + (idx << uwire_idx_shift));
+}
+
+static inline u16 uwire_read_reg(int idx)
+{
+ return __raw_readw(uwire_base + (idx << uwire_idx_shift));
+}
+
+static inline void omap_uwire_configure_mode(u8 cs, unsigned long flags)
+{
+ u16 w, val = 0;
+ int shift, reg;
+
+ if (flags & UWIRE_CLK_INVERTED)
+ val ^= 0x03;
+ val = flags & 0x3f;
+ if (cs & 1)
+ shift = 6;
+ else
+ shift = 0;
+ if (cs <= 1)
+ reg = UWIRE_SR1;
+ else
+ reg = UWIRE_SR2;
+
+ w = uwire_read_reg(reg);
+ w &= ~(0x3f << shift);
+ w |= val << shift;
+ uwire_write_reg(reg, w);
+}
+
+static int wait_uwire_csr_flag(u16 mask, u16 val, int might_not_catch)
+{
+ u16 w;
+ int c = 0;
+ unsigned long max_jiffies = jiffies + HZ;
+
+ for (;;) {
+ w = uwire_read_reg(UWIRE_CSR);
+ if ((w & mask) == val)
+ break;
+ if (time_after(jiffies, max_jiffies)) {
+ printk(KERN_ERR "%s: timeout. reg=%#06x "
+ "mask=%#06x val=%#06x\n",
+ __func__, w, mask, val);
+ return -1;
+ }
+ c++;
+ if (might_not_catch && c > 64)
+ break;
+ }
+ return 0;
+}
+
+static void uwire_set_clk1_div(int div1_idx)
+{
+ u16 w;
+
+ w = uwire_read_reg(UWIRE_SR3);
+ w &= ~(0x03 << 1);
+ w |= div1_idx << 1;
+ uwire_write_reg(UWIRE_SR3, w);
+}
+
+static void uwire_chipselect(struct spi_device *spi, int value)
+{
+ struct uwire_state *ust = spi->controller_state;
+ u16 w;
+ int old_cs;
+
+
+ BUG_ON(wait_uwire_csr_flag(CSRB, 0, 0));
+
+ w = uwire_read_reg(UWIRE_CSR);
+ old_cs = (w >> 10) & 0x03;
+ if (value == BITBANG_CS_INACTIVE || old_cs != spi->chip_select) {
+ /* Deselect this CS, or the previous CS */
+ w &= ~CS_CMD;
+ uwire_write_reg(UWIRE_CSR, w);
+ }
+ /* activate specfied chipselect */
+ if (value == BITBANG_CS_ACTIVE) {
+ uwire_set_clk1_div(ust->div1_idx);
+ /* invert clock? */
+ if (spi->mode & SPI_CPOL)
+ uwire_write_reg(UWIRE_SR4, 1);
+ else
+ uwire_write_reg(UWIRE_SR4, 0);
+
+ w = spi->chip_select << 10;
+ w |= CS_CMD;
+ uwire_write_reg(UWIRE_CSR, w);
+ }
+}
+
+static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ unsigned len = t->len;
+ unsigned bits = t->bits_per_word;
+ unsigned bytes;
+ u16 val, w;
+ int status = 0;
+
+ if (!t->tx_buf && !t->rx_buf)
+ return 0;
+
+ w = spi->chip_select << 10;
+ w |= CS_CMD;
+
+ if (t->tx_buf) {
+ const u8 *buf = t->tx_buf;
+
+ /* NOTE: DMA could be used for TX transfers */
+
+ /* write one or two bytes at a time */
+ while (len >= 1) {
+ /* tx bit 15 is first sent; we byteswap multibyte words
+ * (msb-first) on the way out from memory.
+ */
+ val = *buf++;
+ if (bits > 8) {
+ bytes = 2;
+ val |= *buf++ << 8;
+ } else
+ bytes = 1;
+ val <<= 16 - bits;
+
+#ifdef VERBOSE
+ pr_debug("%s: write-%d =%04x\n",
+ dev_name(&spi->dev), bits, val);
+#endif
+ if (wait_uwire_csr_flag(CSRB, 0, 0))
+ goto eio;
+
+ uwire_write_reg(UWIRE_TDR, val);
+
+ /* start write */
+ val = START | w | (bits << 5);
+
+ uwire_write_reg(UWIRE_CSR, val);
+ len -= bytes;
+
+ /* Wait till write actually starts.
+ * This is needed with MPU clock 60+ MHz.
+ * REVISIT: we may not have time to catch it...
+ */
+ if (wait_uwire_csr_flag(CSRB, CSRB, 1))
+ goto eio;
+
+ status += bytes;
+ }
+
+ /* REVISIT: save this for later to get more i/o overlap */
+ if (wait_uwire_csr_flag(CSRB, 0, 0))
+ goto eio;
+
+ } else if (t->rx_buf) {
+ u8 *buf = t->rx_buf;
+
+ /* read one or two bytes at a time */
+ while (len) {
+ if (bits > 8) {
+ bytes = 2;
+ } else
+ bytes = 1;
+
+ /* start read */
+ val = START | w | (bits << 0);
+ uwire_write_reg(UWIRE_CSR, val);
+ len -= bytes;
+
+ /* Wait till read actually starts */
+ (void) wait_uwire_csr_flag(CSRB, CSRB, 1);
+
+ if (wait_uwire_csr_flag(RDRB | CSRB,
+ RDRB, 0))
+ goto eio;
+
+ /* rx bit 0 is last received; multibyte words will
+ * be properly byteswapped on the way to memory.
+ */
+ val = uwire_read_reg(UWIRE_RDR);
+ val &= (1 << bits) - 1;
+ *buf++ = (u8) val;
+ if (bytes == 2)
+ *buf++ = val >> 8;
+ status += bytes;
+#ifdef VERBOSE
+ pr_debug("%s: read-%d =%04x\n",
+ dev_name(&spi->dev), bits, val);
+#endif
+
+ }
+ }
+ return status;
+eio:
+ return -EIO;
+}
+
+static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct uwire_state *ust = spi->controller_state;
+ struct uwire_spi *uwire;
+ unsigned flags = 0;
+ unsigned hz;
+ unsigned long rate;
+ int div1_idx;
+ int div1;
+ int div2;
+ int status;
+
+ uwire = spi_master_get_devdata(spi->master);
+
+ /* mode 0..3, clock inverted separately;
+ * standard nCS signaling;
+ * don't treat DI=high as "not ready"
+ */
+ if (spi->mode & SPI_CS_HIGH)
+ flags |= UWIRE_CS_ACTIVE_HIGH;
+
+ if (spi->mode & SPI_CPOL)
+ flags |= UWIRE_CLK_INVERTED;
+
+ switch (spi->mode & SPI_MODE_X_MASK) {
+ case SPI_MODE_0:
+ case SPI_MODE_3:
+ flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE;
+ break;
+ case SPI_MODE_1:
+ case SPI_MODE_2:
+ flags |= UWIRE_WRITE_RISING_EDGE | UWIRE_READ_FALLING_EDGE;
+ break;
+ }
+
+ /* assume it's already enabled */
+ rate = clk_get_rate(uwire->ck);
+
+ if (t != NULL)
+ hz = t->speed_hz;
+ else
+ hz = spi->max_speed_hz;
+
+ if (!hz) {
+ pr_debug("%s: zero speed?\n", dev_name(&spi->dev));
+ status = -EINVAL;
+ goto done;
+ }
+
+ /* F_INT = mpu_xor_clk / DIV1 */
+ for (div1_idx = 0; div1_idx < 4; div1_idx++) {
+ switch (div1_idx) {
+ case 0:
+ div1 = 2;
+ break;
+ case 1:
+ div1 = 4;
+ break;
+ case 2:
+ div1 = 7;
+ break;
+ default:
+ case 3:
+ div1 = 10;
+ break;
+ }
+ div2 = (rate / div1 + hz - 1) / hz;
+ if (div2 <= 8)
+ break;
+ }
+ if (div1_idx == 4) {
+ pr_debug("%s: lowest clock %ld, need %d\n",
+ dev_name(&spi->dev), rate / 10 / 8, hz);
+ status = -EDOM;
+ goto done;
+ }
+
+ /* we have to cache this and reset in uwire_chipselect as this is a
+ * global parameter and another uwire device can change it under
+ * us */
+ ust->div1_idx = div1_idx;
+ uwire_set_clk1_div(div1_idx);
+
+ rate /= div1;
+
+ switch (div2) {
+ case 0:
+ case 1:
+ case 2:
+ flags |= UWIRE_FREQ_DIV_2;
+ rate /= 2;
+ break;
+ case 3:
+ case 4:
+ flags |= UWIRE_FREQ_DIV_4;
+ rate /= 4;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ flags |= UWIRE_FREQ_DIV_8;
+ rate /= 8;
+ break;
+ }
+ omap_uwire_configure_mode(spi->chip_select, flags);
+ pr_debug("%s: uwire flags %02x, armxor %lu KHz, SCK %lu KHz\n",
+ __func__, flags,
+ clk_get_rate(uwire->ck) / 1000,
+ rate / 1000);
+ status = 0;
+done:
+ return status;
+}
+
+static int uwire_setup(struct spi_device *spi)
+{
+ struct uwire_state *ust = spi->controller_state;
+ bool initial_setup = false;
+ int status;
+
+ if (ust == NULL) {
+ ust = kzalloc(sizeof(*ust), GFP_KERNEL);
+ if (ust == NULL)
+ return -ENOMEM;
+ spi->controller_state = ust;
+ initial_setup = true;
+ }
+
+ status = uwire_setup_transfer(spi, NULL);
+ if (status && initial_setup)
+ kfree(ust);
+
+ return status;
+}
+
+static void uwire_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+
+static void uwire_off(struct uwire_spi *uwire)
+{
+ uwire_write_reg(UWIRE_SR3, 0);
+ clk_disable_unprepare(uwire->ck);
+ spi_master_put(uwire->bitbang.master);
+}
+
+static int uwire_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct uwire_spi *uwire;
+ int status;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*uwire));
+ if (!master)
+ return -ENODEV;
+
+ uwire = spi_master_get_devdata(master);
+
+ uwire_base = devm_ioremap(&pdev->dev, UWIRE_BASE_PHYS, UWIRE_IO_SIZE);
+ if (!uwire_base) {
+ dev_dbg(&pdev->dev, "can't ioremap UWIRE\n");
+ spi_master_put(master);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, uwire);
+
+ uwire->ck = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(uwire->ck)) {
+ status = PTR_ERR(uwire->ck);
+ dev_dbg(&pdev->dev, "no functional clock?\n");
+ spi_master_put(master);
+ return status;
+ }
+ clk_prepare_enable(uwire->ck);
+
+ if (cpu_is_omap7xx())
+ uwire_idx_shift = 1;
+ else
+ uwire_idx_shift = 2;
+
+ uwire_write_reg(UWIRE_SR3, 1);
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+
+ master->bus_num = 2; /* "official" */
+ master->num_chipselect = 4;
+ master->setup = uwire_setup;
+ master->cleanup = uwire_cleanup;
+
+ uwire->bitbang.master = master;
+ uwire->bitbang.chipselect = uwire_chipselect;
+ uwire->bitbang.setup_transfer = uwire_setup_transfer;
+ uwire->bitbang.txrx_bufs = uwire_txrx;
+
+ status = spi_bitbang_start(&uwire->bitbang);
+ if (status < 0) {
+ uwire_off(uwire);
+ }
+ return status;
+}
+
+static int uwire_remove(struct platform_device *pdev)
+{
+ struct uwire_spi *uwire = platform_get_drvdata(pdev);
+
+ // FIXME remove all child devices, somewhere ...
+
+ spi_bitbang_stop(&uwire->bitbang);
+ uwire_off(uwire);
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:omap_uwire");
+
+static struct platform_driver uwire_driver = {
+ .driver = {
+ .name = "omap_uwire",
+ },
+ .probe = uwire_probe,
+ .remove = uwire_remove,
+ // suspend ... unuse ck
+ // resume ... use ck
+};
+
+static int __init omap_uwire_init(void)
+{
+ /* FIXME move these into the relevant board init code. also, include
+ * H3 support; it uses tsc2101 like H2 (on a different chipselect).
+ */
+
+ if (machine_is_omap_h2()) {
+ /* defaults: W21 SDO, U18 SDI, V19 SCL */
+ omap_cfg_reg(N14_1610_UWIRE_CS0);
+ omap_cfg_reg(N15_1610_UWIRE_CS1);
+ }
+ return platform_driver_register(&uwire_driver);
+}
+
+static void __exit omap_uwire_exit(void)
+{
+ platform_driver_unregister(&uwire_driver);
+}
+
+subsys_initcall(omap_uwire_init);
+module_exit(omap_uwire_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
new file mode 100644
index 000000000..6ba9b0d77
--- /dev/null
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -0,0 +1,1617 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OMAP2 McSPI controller driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
+ * Juha Yrjola <juha.yrjola@nokia.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gcd.h>
+
+#include <linux/spi/spi.h>
+
+#include <linux/platform_data/spi-omap2-mcspi.h>
+
+#define OMAP2_MCSPI_MAX_FREQ 48000000
+#define OMAP2_MCSPI_MAX_DIVIDER 4096
+#define OMAP2_MCSPI_MAX_FIFODEPTH 64
+#define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF
+#define SPI_AUTOSUSPEND_TIMEOUT 2000
+
+#define OMAP2_MCSPI_REVISION 0x00
+#define OMAP2_MCSPI_SYSSTATUS 0x14
+#define OMAP2_MCSPI_IRQSTATUS 0x18
+#define OMAP2_MCSPI_IRQENABLE 0x1c
+#define OMAP2_MCSPI_WAKEUPENABLE 0x20
+#define OMAP2_MCSPI_SYST 0x24
+#define OMAP2_MCSPI_MODULCTRL 0x28
+#define OMAP2_MCSPI_XFERLEVEL 0x7c
+
+/* per-channel banks, 0x14 bytes each, first is: */
+#define OMAP2_MCSPI_CHCONF0 0x2c
+#define OMAP2_MCSPI_CHSTAT0 0x30
+#define OMAP2_MCSPI_CHCTRL0 0x34
+#define OMAP2_MCSPI_TX0 0x38
+#define OMAP2_MCSPI_RX0 0x3c
+
+/* per-register bitmasks: */
+#define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17)
+
+#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
+#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
+#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
+
+#define OMAP2_MCSPI_CHCONF_PHA BIT(0)
+#define OMAP2_MCSPI_CHCONF_POL BIT(1)
+#define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
+#define OMAP2_MCSPI_CHCONF_EPOL BIT(6)
+#define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
+#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12)
+#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13)
+#define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
+#define OMAP2_MCSPI_CHCONF_DMAW BIT(14)
+#define OMAP2_MCSPI_CHCONF_DMAR BIT(15)
+#define OMAP2_MCSPI_CHCONF_DPE0 BIT(16)
+#define OMAP2_MCSPI_CHCONF_DPE1 BIT(17)
+#define OMAP2_MCSPI_CHCONF_IS BIT(18)
+#define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
+#define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
+#define OMAP2_MCSPI_CHCONF_FFET BIT(27)
+#define OMAP2_MCSPI_CHCONF_FFER BIT(28)
+#define OMAP2_MCSPI_CHCONF_CLKG BIT(29)
+
+#define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
+#define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
+#define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
+#define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3)
+
+#define OMAP2_MCSPI_CHCTRL_EN BIT(0)
+#define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK (0xff << 8)
+
+#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
+
+/* We have 2 DMA channels per CS, one for RX and one for TX */
+struct omap2_mcspi_dma {
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+
+ struct completion dma_tx_completion;
+ struct completion dma_rx_completion;
+
+ char dma_rx_ch_name[14];
+ char dma_tx_ch_name[14];
+};
+
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 160
+
+
+/*
+ * Used for context save and restore, structure members to be updated whenever
+ * corresponding registers are modified.
+ */
+struct omap2_mcspi_regs {
+ u32 modulctrl;
+ u32 wakeupenable;
+ struct list_head cs;
+};
+
+struct omap2_mcspi {
+ struct completion txdone;
+ struct spi_master *master;
+ /* Virtual base address of the controller */
+ void __iomem *base;
+ unsigned long phys;
+ /* SPI1 has 4 channels, while SPI2 has 2 */
+ struct omap2_mcspi_dma *dma_channels;
+ struct device *dev;
+ struct omap2_mcspi_regs ctx;
+ int fifo_depth;
+ bool slave_aborted;
+ unsigned int pin_dir:1;
+ size_t max_xfer_len;
+};
+
+struct omap2_mcspi_cs {
+ void __iomem *base;
+ unsigned long phys;
+ int word_len;
+ u16 mode;
+ struct list_head node;
+ /* Context save and restore shadow register */
+ u32 chconf0, chctrl0;
+};
+
+static inline void mcspi_write_reg(struct spi_master *master,
+ int idx, u32 val)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ writel_relaxed(val, mcspi->base + idx);
+}
+
+static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ return readl_relaxed(mcspi->base + idx);
+}
+
+static inline void mcspi_write_cs_reg(const struct spi_device *spi,
+ int idx, u32 val)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ writel_relaxed(val, cs->base + idx);
+}
+
+static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ return readl_relaxed(cs->base + idx);
+}
+
+static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ return cs->chconf0;
+}
+
+static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ cs->chconf0 = val;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+}
+
+static inline int mcspi_bytes_per_word(int word_len)
+{
+ if (word_len <= 8)
+ return 1;
+ else if (word_len <= 16)
+ return 2;
+ else /* word_len <= 32 */
+ return 4;
+}
+
+static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
+ int is_read, int enable)
+{
+ u32 l, rw;
+
+ l = mcspi_cached_chconf0(spi);
+
+ if (is_read) /* 1 is read, 0 write */
+ rw = OMAP2_MCSPI_CHCONF_DMAR;
+ else
+ rw = OMAP2_MCSPI_CHCONF_DMAW;
+
+ if (enable)
+ l |= rw;
+ else
+ l &= ~rw;
+
+ mcspi_write_chconf0(spi, l);
+}
+
+static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ u32 l;
+
+ l = cs->chctrl0;
+ if (enable)
+ l |= OMAP2_MCSPI_CHCTRL_EN;
+ else
+ l &= ~OMAP2_MCSPI_CHCTRL_EN;
+ cs->chctrl0 = l;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
+ /* Flash post-writes */
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
+}
+
+static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ u32 l;
+
+ /* The controller handles the inverted chip selects
+ * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert
+ * the inversion from the core spi_set_cs function.
+ */
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
+
+ if (spi->controller_state) {
+ int err = pm_runtime_resume_and_get(mcspi->dev);
+ if (err < 0) {
+ dev_err(mcspi->dev, "failed to get sync: %d\n", err);
+ return;
+ }
+
+ l = mcspi_cached_chconf0(spi);
+
+ if (enable)
+ l &= ~OMAP2_MCSPI_CHCONF_FORCE;
+ else
+ l |= OMAP2_MCSPI_CHCONF_FORCE;
+
+ mcspi_write_chconf0(spi, l);
+
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
+ }
+}
+
+static void omap2_mcspi_set_mode(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ u32 l;
+
+ /*
+ * Choose master or slave mode
+ */
+ l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
+ l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
+ if (spi_controller_is_slave(master)) {
+ l |= (OMAP2_MCSPI_MODULCTRL_MS);
+ } else {
+ l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
+ l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ }
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
+
+ ctx->modulctrl = l;
+}
+
+static void omap2_mcspi_set_fifo(const struct spi_device *spi,
+ struct spi_transfer *t, int enable)
+{
+ struct spi_master *master = spi->master;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+ unsigned int wcnt;
+ int max_fifo_depth, bytes_per_word;
+ u32 chconf, xferlevel;
+
+ mcspi = spi_master_get_devdata(master);
+
+ chconf = mcspi_cached_chconf0(spi);
+ if (enable) {
+ bytes_per_word = mcspi_bytes_per_word(cs->word_len);
+ if (t->len % bytes_per_word != 0)
+ goto disable_fifo;
+
+ if (t->rx_buf != NULL && t->tx_buf != NULL)
+ max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
+ else
+ max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
+
+ wcnt = t->len / bytes_per_word;
+ if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
+ goto disable_fifo;
+
+ xferlevel = wcnt << 16;
+ if (t->rx_buf != NULL) {
+ chconf |= OMAP2_MCSPI_CHCONF_FFER;
+ xferlevel |= (bytes_per_word - 1) << 8;
+ }
+
+ if (t->tx_buf != NULL) {
+ chconf |= OMAP2_MCSPI_CHCONF_FFET;
+ xferlevel |= bytes_per_word - 1;
+ }
+
+ mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
+ mcspi_write_chconf0(spi, chconf);
+ mcspi->fifo_depth = max_fifo_depth;
+
+ return;
+ }
+
+disable_fifo:
+ if (t->rx_buf != NULL)
+ chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
+
+ if (t->tx_buf != NULL)
+ chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
+
+ mcspi_write_chconf0(spi, chconf);
+ mcspi->fifo_depth = 0;
+}
+
+static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!(readl_relaxed(reg) & bit)) {
+ if (time_after(jiffies, timeout)) {
+ if (!(readl_relaxed(reg) & bit))
+ return -ETIMEDOUT;
+ else
+ return 0;
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
+static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
+ struct completion *x)
+{
+ if (spi_controller_is_slave(mcspi->master)) {
+ if (wait_for_completion_interruptible(x) ||
+ mcspi->slave_aborted)
+ return -EINTR;
+ } else {
+ wait_for_completion(x);
+ }
+
+ return 0;
+}
+
+static void omap2_mcspi_rx_callback(void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ /* We must disable the DMA RX request */
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+
+ complete(&mcspi_dma->dma_rx_completion);
+}
+
+static void omap2_mcspi_tx_callback(void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ /* We must disable the DMA TX request */
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+
+ complete(&mcspi_dma->dma_tx_completion);
+}
+
+static void omap2_mcspi_tx_dma(struct spi_device *spi,
+ struct spi_transfer *xfer,
+ struct dma_slave_config cfg)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+ struct dma_async_tx_descriptor *tx;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
+
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_tx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
+ }
+ dma_async_issue_pending(mcspi_dma->dma_tx);
+ omap2_mcspi_set_dma_req(spi, 0, 1);
+}
+
+static unsigned
+omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ struct dma_slave_config cfg,
+ unsigned es)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+ unsigned int count, transfer_reduction = 0;
+ struct scatterlist *sg_out[2];
+ int nb_sizes = 0, out_mapped_nents[2], ret, x;
+ size_t sizes[2];
+ u32 l;
+ int elements = 0;
+ int word_len, element_count;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ struct dma_async_tx_descriptor *tx;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+ count = xfer->len;
+
+ /*
+ * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
+ * it mentions reducing DMA transfer length by one element in master
+ * normal mode.
+ */
+ if (mcspi->fifo_depth == 0)
+ transfer_reduction = es;
+
+ word_len = cs->word_len;
+ l = mcspi_cached_chconf0(spi);
+
+ if (word_len <= 8)
+ element_count = count;
+ else if (word_len <= 16)
+ element_count = count >> 1;
+ else /* word_len <= 32 */
+ element_count = count >> 2;
+
+
+ dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+
+ /*
+ * Reduce DMA transfer length by one more if McSPI is
+ * configured in turbo mode.
+ */
+ if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
+ transfer_reduction += es;
+
+ if (transfer_reduction) {
+ /* Split sgl into two. The second sgl won't be used. */
+ sizes[0] = count - transfer_reduction;
+ sizes[1] = transfer_reduction;
+ nb_sizes = 2;
+ } else {
+ /*
+ * Don't bother splitting the sgl. This essentially
+ * clones the original sgl.
+ */
+ sizes[0] = count;
+ nb_sizes = 1;
+ }
+
+ ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents, 0, nb_sizes,
+ sizes, sg_out, out_mapped_nents, GFP_KERNEL);
+
+ if (ret < 0) {
+ dev_err(&spi->dev, "sg_split failed\n");
+ return 0;
+ }
+
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, sg_out[0],
+ out_mapped_nents[0], DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_rx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
+ }
+
+ dma_async_issue_pending(mcspi_dma->dma_rx);
+ omap2_mcspi_set_dma_req(spi, 1, 1);
+
+ ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
+ if (ret || mcspi->slave_aborted) {
+ dmaengine_terminate_sync(mcspi_dma->dma_rx);
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+ return 0;
+ }
+
+ for (x = 0; x < nb_sizes; x++)
+ kfree(sg_out[x]);
+
+ if (mcspi->fifo_depth > 0)
+ return count;
+
+ /*
+ * Due to the DMA transfer length reduction the missing bytes must
+ * be read manually to receive all of the expected data.
+ */
+ omap2_mcspi_set_enable(spi, 0);
+
+ elements = element_count - 1;
+
+ if (l & OMAP2_MCSPI_CHCONF_TURBO) {
+ elements--;
+
+ if (!mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS)) {
+ u32 w;
+
+ w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
+ if (word_len <= 8)
+ ((u8 *)xfer->rx_buf)[elements++] = w;
+ else if (word_len <= 16)
+ ((u16 *)xfer->rx_buf)[elements++] = w;
+ else /* word_len <= 32 */
+ ((u32 *)xfer->rx_buf)[elements++] = w;
+ } else {
+ int bytes_per_word = mcspi_bytes_per_word(word_len);
+ dev_err(&spi->dev, "DMA RX penultimate word empty\n");
+ count -= (bytes_per_word << 1);
+ omap2_mcspi_set_enable(spi, 1);
+ return count;
+ }
+ }
+ if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
+ u32 w;
+
+ w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
+ if (word_len <= 8)
+ ((u8 *)xfer->rx_buf)[elements] = w;
+ else if (word_len <= 16)
+ ((u16 *)xfer->rx_buf)[elements] = w;
+ else /* word_len <= 32 */
+ ((u32 *)xfer->rx_buf)[elements] = w;
+ } else {
+ dev_err(&spi->dev, "DMA RX last word empty\n");
+ count -= mcspi_bytes_per_word(word_len);
+ }
+ omap2_mcspi_set_enable(spi, 1);
+ return count;
+}
+
+static unsigned
+omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi_dma *mcspi_dma;
+ unsigned int count;
+ u8 *rx;
+ const u8 *tx;
+ struct dma_slave_config cfg;
+ enum dma_slave_buswidth width;
+ unsigned es;
+ void __iomem *chstat_reg;
+ void __iomem *irqstat_reg;
+ int wait_res;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ if (cs->word_len <= 8) {
+ width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ es = 1;
+ } else if (cs->word_len <= 16) {
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ es = 2;
+ } else {
+ width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ es = 4;
+ }
+
+ count = xfer->len;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
+ cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
+ cfg.src_addr_width = width;
+ cfg.dst_addr_width = width;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+
+ mcspi->slave_aborted = false;
+ reinit_completion(&mcspi_dma->dma_tx_completion);
+ reinit_completion(&mcspi_dma->dma_rx_completion);
+ reinit_completion(&mcspi->txdone);
+ if (tx) {
+ /* Enable EOW IRQ to know end of tx in slave mode */
+ if (spi_controller_is_slave(spi->master))
+ mcspi_write_reg(spi->master,
+ OMAP2_MCSPI_IRQENABLE,
+ OMAP2_MCSPI_IRQSTATUS_EOW);
+ omap2_mcspi_tx_dma(spi, xfer, cfg);
+ }
+
+ if (rx != NULL)
+ count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
+
+ if (tx != NULL) {
+ int ret;
+
+ ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
+ if (ret || mcspi->slave_aborted) {
+ dmaengine_terminate_sync(mcspi_dma->dma_tx);
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+ return 0;
+ }
+
+ if (spi_controller_is_slave(mcspi->master)) {
+ ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
+ if (ret || mcspi->slave_aborted)
+ return 0;
+ }
+
+ if (mcspi->fifo_depth > 0) {
+ irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
+
+ if (mcspi_wait_for_reg_bit(irqstat_reg,
+ OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
+ dev_err(&spi->dev, "EOW timed out\n");
+
+ mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
+ OMAP2_MCSPI_IRQSTATUS_EOW);
+ }
+
+ /* for TX_ONLY mode, be sure all words have shifted out */
+ if (rx == NULL) {
+ chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ if (mcspi->fifo_depth > 0) {
+ wait_res = mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXFFE);
+ if (wait_res < 0)
+ dev_err(&spi->dev, "TXFFE timed out\n");
+ } else {
+ wait_res = mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS);
+ if (wait_res < 0)
+ dev_err(&spi->dev, "TXS timed out\n");
+ }
+ if (wait_res >= 0 &&
+ (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_EOT) < 0))
+ dev_err(&spi->dev, "EOT timed out\n");
+ }
+ }
+ return count;
+}
+
+static unsigned
+omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ unsigned int count, c;
+ u32 l;
+ void __iomem *base = cs->base;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+ void __iomem *chstat_reg;
+ int word_len;
+
+ count = xfer->len;
+ c = count;
+ word_len = cs->word_len;
+
+ l = mcspi_cached_chconf0(spi);
+
+ /* We store the pre-calculated register addresses on stack to speed
+ * up the transfer loop. */
+ tx_reg = base + OMAP2_MCSPI_TX0;
+ rx_reg = base + OMAP2_MCSPI_RX0;
+ chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
+
+ if (c < (word_len>>3))
+ return 0;
+
+ if (word_len <= 8) {
+ u8 *rx;
+ const u8 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+
+ do {
+ c -= 1;
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+ dev_vdbg(&spi->dev, "write-%d %02x\n",
+ word_len, *tx);
+ writel_relaxed(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+
+ if (c == 1 && tx == NULL &&
+ (l & OMAP2_MCSPI_CHCONF_TURBO)) {
+ omap2_mcspi_set_enable(spi, 0);
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %02x\n",
+ word_len, *(rx - 1));
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev,
+ "RXS timed out\n");
+ goto out;
+ }
+ c = 0;
+ } else if (c == 0 && tx == NULL) {
+ omap2_mcspi_set_enable(spi, 0);
+ }
+
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %02x\n",
+ word_len, *(rx - 1));
+ }
+ /* Add word delay between each word */
+ spi_delay_exec(&xfer->word_delay, xfer);
+ } while (c);
+ } else if (word_len <= 16) {
+ u16 *rx;
+ const u16 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c -= 2;
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+ dev_vdbg(&spi->dev, "write-%d %04x\n",
+ word_len, *tx);
+ writel_relaxed(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+
+ if (c == 2 && tx == NULL &&
+ (l & OMAP2_MCSPI_CHCONF_TURBO)) {
+ omap2_mcspi_set_enable(spi, 0);
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %04x\n",
+ word_len, *(rx - 1));
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev,
+ "RXS timed out\n");
+ goto out;
+ }
+ c = 0;
+ } else if (c == 0 && tx == NULL) {
+ omap2_mcspi_set_enable(spi, 0);
+ }
+
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %04x\n",
+ word_len, *(rx - 1));
+ }
+ /* Add word delay between each word */
+ spi_delay_exec(&xfer->word_delay, xfer);
+ } while (c >= 2);
+ } else if (word_len <= 32) {
+ u32 *rx;
+ const u32 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c -= 4;
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+ dev_vdbg(&spi->dev, "write-%d %08x\n",
+ word_len, *tx);
+ writel_relaxed(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+
+ if (c == 4 && tx == NULL &&
+ (l & OMAP2_MCSPI_CHCONF_TURBO)) {
+ omap2_mcspi_set_enable(spi, 0);
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %08x\n",
+ word_len, *(rx - 1));
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev,
+ "RXS timed out\n");
+ goto out;
+ }
+ c = 0;
+ } else if (c == 0 && tx == NULL) {
+ omap2_mcspi_set_enable(spi, 0);
+ }
+
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %08x\n",
+ word_len, *(rx - 1));
+ }
+ /* Add word delay between each word */
+ spi_delay_exec(&xfer->word_delay, xfer);
+ } while (c >= 4);
+ }
+
+ /* for TX_ONLY mode, be sure all words have shifted out */
+ if (xfer->rx_buf == NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ } else if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_EOT) < 0)
+ dev_err(&spi->dev, "EOT timed out\n");
+
+ /* disable chan to purge rx datas received in TX_ONLY transfer,
+ * otherwise these rx datas will affect the direct following
+ * RX_ONLY transfer.
+ */
+ omap2_mcspi_set_enable(spi, 0);
+ }
+out:
+ omap2_mcspi_set_enable(spi, 1);
+ return count - c;
+}
+
+static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
+{
+ u32 div;
+
+ for (div = 0; div < 15; div++)
+ if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
+ return div;
+
+ return 15;
+}
+
+/* called only when no transfer is active to this device */
+static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+ u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
+ u8 word_len = spi->bits_per_word;
+ u32 speed_hz = spi->max_speed_hz;
+
+ mcspi = spi_master_get_devdata(spi->master);
+
+ if (t != NULL && t->bits_per_word)
+ word_len = t->bits_per_word;
+
+ cs->word_len = word_len;
+
+ if (t && t->speed_hz)
+ speed_hz = t->speed_hz;
+
+ speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
+ if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+ clkd = omap2_mcspi_calc_divisor(speed_hz);
+ speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
+ clkg = 0;
+ } else {
+ div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+ speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
+ clkd = (div - 1) & 0xf;
+ extclk = (div - 1) >> 4;
+ clkg = OMAP2_MCSPI_CHCONF_CLKG;
+ }
+
+ l = mcspi_cached_chconf0(spi);
+
+ /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
+ * REVISIT: this controller could support SPI_3WIRE mode.
+ */
+ if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+ l &= ~OMAP2_MCSPI_CHCONF_IS;
+ l &= ~OMAP2_MCSPI_CHCONF_DPE1;
+ l |= OMAP2_MCSPI_CHCONF_DPE0;
+ } else {
+ l |= OMAP2_MCSPI_CHCONF_IS;
+ l |= OMAP2_MCSPI_CHCONF_DPE1;
+ l &= ~OMAP2_MCSPI_CHCONF_DPE0;
+ }
+
+ /* wordlength */
+ l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
+ l |= (word_len - 1) << 7;
+
+ /* set chipselect polarity; manage with FORCE */
+ if (!(spi->mode & SPI_CS_HIGH))
+ l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_EPOL;
+
+ /* set clock divisor */
+ l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
+ l |= clkd << 2;
+
+ /* set clock granularity */
+ l &= ~OMAP2_MCSPI_CHCONF_CLKG;
+ l |= clkg;
+ if (clkg) {
+ cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
+ cs->chctrl0 |= extclk << 8;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
+ }
+
+ /* set SPI mode 0..3 */
+ if (spi->mode & SPI_CPOL)
+ l |= OMAP2_MCSPI_CHCONF_POL;
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_POL;
+ if (spi->mode & SPI_CPHA)
+ l |= OMAP2_MCSPI_CHCONF_PHA;
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_PHA;
+
+ mcspi_write_chconf0(spi, l);
+
+ cs->mode = spi->mode;
+
+ dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
+ speed_hz,
+ (spi->mode & SPI_CPHA) ? "trailing" : "leading",
+ (spi->mode & SPI_CPOL) ? "inverted" : "normal");
+
+ return 0;
+}
+
+/*
+ * Note that we currently allow DMA only if we get a channel
+ * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
+ */
+static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
+ struct omap2_mcspi_dma *mcspi_dma)
+{
+ int ret = 0;
+
+ mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
+ mcspi_dma->dma_rx_ch_name);
+ if (IS_ERR(mcspi_dma->dma_rx)) {
+ ret = PTR_ERR(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
+ goto no_dma;
+ }
+
+ mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
+ mcspi_dma->dma_tx_ch_name);
+ if (IS_ERR(mcspi_dma->dma_tx)) {
+ ret = PTR_ERR(mcspi_dma->dma_tx);
+ mcspi_dma->dma_tx = NULL;
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
+ }
+
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+
+no_dma:
+ return ret;
+}
+
+static void omap2_mcspi_release_dma(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_dma *mcspi_dma;
+ int i;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ mcspi_dma = &mcspi->dma_channels[i];
+
+ if (mcspi_dma->dma_rx) {
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
+ }
+ if (mcspi_dma->dma_tx) {
+ dma_release_channel(mcspi_dma->dma_tx);
+ mcspi_dma->dma_tx = NULL;
+ }
+ }
+}
+
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+ struct omap2_mcspi_cs *cs;
+
+ if (spi->controller_state) {
+ /* Unlink controller state from context save list */
+ cs = spi->controller_state;
+ list_del(&cs->node);
+
+ kfree(cs);
+ }
+}
+
+static int omap2_mcspi_setup(struct spi_device *spi)
+{
+ bool initial_setup = false;
+ int ret;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ if (!cs) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ cs->base = mcspi->base + spi->chip_select * 0x14;
+ cs->phys = mcspi->phys + spi->chip_select * 0x14;
+ cs->mode = 0;
+ cs->chconf0 = 0;
+ cs->chctrl0 = 0;
+ spi->controller_state = cs;
+ /* Link this to context save list */
+ list_add_tail(&cs->node, &ctx->cs);
+ initial_setup = true;
+ }
+
+ ret = pm_runtime_resume_and_get(mcspi->dev);
+ if (ret < 0) {
+ if (initial_setup)
+ omap2_mcspi_cleanup(spi);
+
+ return ret;
+ }
+
+ ret = omap2_mcspi_setup_transfer(spi, NULL);
+ if (ret && initial_setup)
+ omap2_mcspi_cleanup(spi);
+
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
+
+ return ret;
+}
+
+static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
+{
+ struct omap2_mcspi *mcspi = data;
+ u32 irqstat;
+
+ irqstat = mcspi_read_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS);
+ if (!irqstat)
+ return IRQ_NONE;
+
+ /* Disable IRQ and wakeup slave xfer task */
+ mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQENABLE, 0);
+ if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
+ complete(&mcspi->txdone);
+
+ return IRQ_HANDLED;
+}
+
+static int omap2_mcspi_slave_abort(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
+
+ mcspi->slave_aborted = true;
+ complete(&mcspi_dma->dma_rx_completion);
+ complete(&mcspi_dma->dma_tx_completion);
+ complete(&mcspi->txdone);
+
+ return 0;
+}
+
+static int omap2_mcspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+
+ /* We only enable one channel at a time -- the one whose message is
+ * -- although this controller would gladly
+ * arbitrate among multiple channels. This corresponds to "single
+ * channel" master mode. As a side effect, we need to manage the
+ * chipselect with the FORCE bit ... CS != channel enable.
+ */
+
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+ struct omap2_mcspi_cs *cs;
+ struct omap2_mcspi_device_config *cd;
+ int par_override = 0;
+ int status = 0;
+ u32 chconf;
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
+ cs = spi->controller_state;
+ cd = spi->controller_data;
+
+ /*
+ * The slave driver could have changed spi->mode in which case
+ * it will be different from cs->mode (the current hardware setup).
+ * If so, set par_override (even though its not a parity issue) so
+ * omap2_mcspi_setup_transfer will be called to configure the hardware
+ * with the correct mode on the first iteration of the loop below.
+ */
+ if (spi->mode != cs->mode)
+ par_override = 1;
+
+ omap2_mcspi_set_enable(spi, 0);
+
+ if (spi->cs_gpiod)
+ omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
+
+ if (par_override ||
+ (t->speed_hz != spi->max_speed_hz) ||
+ (t->bits_per_word != spi->bits_per_word)) {
+ par_override = 1;
+ status = omap2_mcspi_setup_transfer(spi, t);
+ if (status < 0)
+ goto out;
+ if (t->speed_hz == spi->max_speed_hz &&
+ t->bits_per_word == spi->bits_per_word)
+ par_override = 0;
+ }
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+
+ chconf = mcspi_cached_chconf0(spi);
+ chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+ chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
+
+ if (t->tx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
+ else if (t->rx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
+
+ if (cd && cd->turbo_mode && t->tx_buf == NULL) {
+ /* Turbo mode is for more than one word */
+ if (t->len > ((cs->word_len + 7) >> 3))
+ chconf |= OMAP2_MCSPI_CHCONF_TURBO;
+ }
+
+ mcspi_write_chconf0(spi, chconf);
+
+ if (t->len) {
+ unsigned count;
+
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+ master->cur_msg_mapped &&
+ master->can_dma(master, spi, t))
+ omap2_mcspi_set_fifo(spi, t, 1);
+
+ omap2_mcspi_set_enable(spi, 1);
+
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (t->tx_buf == NULL)
+ writel_relaxed(0, cs->base
+ + OMAP2_MCSPI_TX0);
+
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+ master->cur_msg_mapped &&
+ master->can_dma(master, spi, t))
+ count = omap2_mcspi_txrx_dma(spi, t);
+ else
+ count = omap2_mcspi_txrx_pio(spi, t);
+
+ if (count != t->len) {
+ status = -EIO;
+ goto out;
+ }
+ }
+
+ omap2_mcspi_set_enable(spi, 0);
+
+ if (mcspi->fifo_depth > 0)
+ omap2_mcspi_set_fifo(spi, t, 0);
+
+out:
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap2_mcspi_setup_transfer(spi, NULL);
+ }
+
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+
+ omap2_mcspi_set_enable(spi, 0);
+
+ if (spi->cs_gpiod)
+ omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
+
+ if (mcspi->fifo_depth > 0 && t)
+ omap2_mcspi_set_fifo(spi, t, 0);
+
+ return status;
+}
+
+static int omap2_mcspi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
+
+ /* Only a single channel can have the FORCE bit enabled
+ * in its chconf0 register.
+ * Scan all channels and disable them except the current one.
+ * A FORCE can remain from a last transfer having cs_change enabled
+ */
+ list_for_each_entry(cs, &ctx->cs, node) {
+ if (msg->spi->controller_state == cs)
+ continue;
+
+ if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) {
+ cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
+ writel_relaxed(cs->chconf0,
+ cs->base + OMAP2_MCSPI_CHCONF0);
+ readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0);
+ }
+ }
+
+ return 0;
+}
+
+static bool omap2_mcspi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma =
+ &mcspi->dma_channels[spi->chip_select];
+
+ if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
+ return false;
+
+ if (spi_controller_is_slave(master))
+ return true;
+
+ master->dma_rx = mcspi_dma->dma_rx;
+ master->dma_tx = mcspi_dma->dma_tx;
+
+ return (xfer->len >= DMA_MIN_BYTES);
+}
+
+static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma =
+ &mcspi->dma_channels[spi->chip_select];
+
+ if (mcspi->max_xfer_len && mcspi_dma->dma_rx)
+ return mcspi->max_xfer_len;
+
+ return SIZE_MAX;
+}
+
+static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
+{
+ struct spi_master *master = mcspi->master;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get(mcspi->dev);
+ if (ret < 0)
+ return ret;
+
+ mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
+ OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+ ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
+
+ omap2_mcspi_set_mode(master);
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
+ return 0;
+}
+
+static int omap_mcspi_runtime_suspend(struct device *dev)
+{
+ int error;
+
+ error = pinctrl_pm_select_idle_state(dev);
+ if (error)
+ dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
+
+ return 0;
+}
+
+/*
+ * When SPI wake up from off-mode, CS is in activate state. If it was in
+ * inactive state when driver was suspend, then force it to inactive state at
+ * wake up.
+ */
+static int omap_mcspi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
+ int error;
+
+ error = pinctrl_pm_select_default_state(dev);
+ if (error)
+ dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
+
+ /* McSPI: context restore */
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
+ mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
+
+ list_for_each_entry(cs, &ctx->cs, node) {
+ /*
+ * We need to toggle CS state for OMAP take this
+ * change in account.
+ */
+ if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
+ cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
+ writel_relaxed(cs->chconf0,
+ cs->base + OMAP2_MCSPI_CHCONF0);
+ cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
+ writel_relaxed(cs->chconf0,
+ cs->base + OMAP2_MCSPI_CHCONF0);
+ } else {
+ writel_relaxed(cs->chconf0,
+ cs->base + OMAP2_MCSPI_CHCONF0);
+ }
+ }
+
+ return 0;
+}
+
+static struct omap2_mcspi_platform_config omap2_pdata = {
+ .regs_offset = 0,
+};
+
+static struct omap2_mcspi_platform_config omap4_pdata = {
+ .regs_offset = OMAP4_MCSPI_REG_OFFSET,
+};
+
+static struct omap2_mcspi_platform_config am654_pdata = {
+ .regs_offset = OMAP4_MCSPI_REG_OFFSET,
+ .max_xfer_len = SZ_4K - 1,
+};
+
+static const struct of_device_id omap_mcspi_of_match[] = {
+ {
+ .compatible = "ti,omap2-mcspi",
+ .data = &omap2_pdata,
+ },
+ {
+ .compatible = "ti,omap4-mcspi",
+ .data = &omap4_pdata,
+ },
+ {
+ .compatible = "ti,am654-mcspi",
+ .data = &am654_pdata,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
+
+static int omap2_mcspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ const struct omap2_mcspi_platform_config *pdata;
+ struct omap2_mcspi *mcspi;
+ struct resource *r;
+ int status = 0, i;
+ u32 regs_offset = 0;
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
+
+ if (of_property_read_bool(node, "spi-slave"))
+ master = spi_alloc_slave(&pdev->dev, sizeof(*mcspi));
+ else
+ master = spi_alloc_master(&pdev->dev, sizeof(*mcspi));
+ if (!master)
+ return -ENOMEM;
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->setup = omap2_mcspi_setup;
+ master->auto_runtime_pm = true;
+ master->prepare_message = omap2_mcspi_prepare_message;
+ master->can_dma = omap2_mcspi_can_dma;
+ master->transfer_one = omap2_mcspi_transfer_one;
+ master->set_cs = omap2_mcspi_set_cs;
+ master->cleanup = omap2_mcspi_cleanup;
+ master->slave_abort = omap2_mcspi_slave_abort;
+ master->dev.of_node = node;
+ master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+ master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
+ master->use_gpio_descriptors = true;
+
+ platform_set_drvdata(pdev, master);
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi->master = master;
+
+ match = of_match_device(omap_mcspi_of_match, &pdev->dev);
+ if (match) {
+ u32 num_cs = 1; /* default number of chipselect */
+ pdata = match->data;
+
+ of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
+ master->num_chipselect = num_cs;
+ if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
+ mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+ master->num_chipselect = pdata->num_cs;
+ mcspi->pin_dir = pdata->pin_dir;
+ }
+ regs_offset = pdata->regs_offset;
+ if (pdata->max_xfer_len) {
+ mcspi->max_xfer_len = pdata->max_xfer_len;
+ master->max_transfer_size = omap2_mcspi_max_xfer_size;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mcspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(mcspi->base)) {
+ status = PTR_ERR(mcspi->base);
+ goto free_master;
+ }
+ mcspi->phys = r->start + regs_offset;
+ mcspi->base += regs_offset;
+
+ mcspi->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&mcspi->ctx.cs);
+
+ mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
+ sizeof(struct omap2_mcspi_dma),
+ GFP_KERNEL);
+ if (mcspi->dma_channels == NULL) {
+ status = -ENOMEM;
+ goto free_master;
+ }
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
+ sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
+
+ status = omap2_mcspi_request_dma(mcspi,
+ &mcspi->dma_channels[i]);
+ if (status == -EPROBE_DEFER)
+ goto free_master;
+ }
+
+ status = platform_get_irq(pdev, 0);
+ if (status < 0) {
+ dev_err_probe(&pdev->dev, status, "no irq resource found\n");
+ goto free_master;
+ }
+ init_completion(&mcspi->txdone);
+ status = devm_request_irq(&pdev->dev, status,
+ omap2_mcspi_irq_handler, 0, pdev->name,
+ mcspi);
+ if (status) {
+ dev_err(&pdev->dev, "Cannot request IRQ");
+ goto free_master;
+ }
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_enable(&pdev->dev);
+
+ status = omap2_mcspi_controller_setup(mcspi);
+ if (status < 0)
+ goto disable_pm;
+
+ status = devm_spi_register_controller(&pdev->dev, master);
+ if (status < 0)
+ goto disable_pm;
+
+ return status;
+
+disable_pm:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+free_master:
+ omap2_mcspi_release_dma(master);
+ spi_master_put(master);
+ return status;
+}
+
+static int omap2_mcspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ omap2_mcspi_release_dma(master);
+
+ pm_runtime_dont_use_autosuspend(mcspi->dev);
+ pm_runtime_put_sync(mcspi->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:omap2_mcspi");
+
+static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ int error;
+
+ error = pinctrl_pm_select_sleep_state(dev);
+ if (error)
+ dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
+ __func__, error);
+
+ error = spi_master_suspend(master);
+ if (error)
+ dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
+ __func__, error);
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused omap2_mcspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ int error;
+
+ error = spi_master_resume(master);
+ if (error)
+ dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
+ __func__, error);
+
+ return pm_runtime_force_resume(dev);
+}
+
+static const struct dev_pm_ops omap2_mcspi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
+ omap2_mcspi_resume)
+ .runtime_suspend = omap_mcspi_runtime_suspend,
+ .runtime_resume = omap_mcspi_runtime_resume,
+};
+
+static struct platform_driver omap2_mcspi_driver = {
+ .driver = {
+ .name = "omap2_mcspi",
+ .pm = &omap2_mcspi_pm_ops,
+ .of_match_table = omap_mcspi_of_match,
+ },
+ .probe = omap2_mcspi_probe,
+ .remove = omap2_mcspi_remove,
+};
+
+module_platform_driver(omap2_mcspi_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
new file mode 100644
index 000000000..565cd4c48
--- /dev/null
+++ b/drivers/spi/spi-orion.c
@@ -0,0 +1,867 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Marvell Orion SPI controller driver
+ *
+ * Author: Shadi Ammouri <shadi@marvell.com>
+ * Copyright (C) 2007-2008 Marvell Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/sizes.h>
+#include <asm/unaligned.h>
+
+#define DRIVER_NAME "orion_spi"
+
+/* Runtime PM autosuspend timeout: PM is fairly light on this driver */
+#define SPI_AUTOSUSPEND_TIMEOUT 200
+
+/* Some SoCs using this driver support up to 8 chip selects.
+ * It is up to the implementer to only use the chip selects
+ * that are available.
+ */
+#define ORION_NUM_CHIPSELECTS 8
+
+#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */
+
+#define ORION_SPI_IF_CTRL_REG 0x00
+#define ORION_SPI_IF_CONFIG_REG 0x04
+#define ORION_SPI_IF_RXLSBF BIT(14)
+#define ORION_SPI_IF_TXLSBF BIT(13)
+#define ORION_SPI_DATA_OUT_REG 0x08
+#define ORION_SPI_DATA_IN_REG 0x0c
+#define ORION_SPI_INT_CAUSE_REG 0x10
+#define ORION_SPI_TIMING_PARAMS_REG 0x18
+
+/* Register for the "Direct Mode" */
+#define SPI_DIRECT_WRITE_CONFIG_REG 0x20
+
+#define ORION_SPI_TMISO_SAMPLE_MASK (0x3 << 6)
+#define ORION_SPI_TMISO_SAMPLE_1 (1 << 6)
+#define ORION_SPI_TMISO_SAMPLE_2 (2 << 6)
+
+#define ORION_SPI_MODE_CPOL (1 << 11)
+#define ORION_SPI_MODE_CPHA (1 << 12)
+#define ORION_SPI_IF_8_16_BIT_MODE (1 << 5)
+#define ORION_SPI_CLK_PRESCALE_MASK 0x1F
+#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF
+#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
+ ORION_SPI_MODE_CPHA)
+#define ORION_SPI_CS_MASK 0x1C
+#define ORION_SPI_CS_SHIFT 2
+#define ORION_SPI_CS(cs) ((cs << ORION_SPI_CS_SHIFT) & \
+ ORION_SPI_CS_MASK)
+
+enum orion_spi_type {
+ ORION_SPI,
+ ARMADA_SPI,
+};
+
+struct orion_spi_dev {
+ enum orion_spi_type typ;
+ /*
+ * min_divisor and max_hz should be exclusive, the only we can
+ * have both is for managing the armada-370-spi case with old
+ * device tree
+ */
+ unsigned long max_hz;
+ unsigned int min_divisor;
+ unsigned int max_divisor;
+ u32 prescale_mask;
+ bool is_errata_50mhz_ac;
+};
+
+struct orion_direct_acc {
+ void __iomem *vaddr;
+ u32 size;
+};
+
+struct orion_child_options {
+ struct orion_direct_acc direct_access;
+};
+
+struct orion_spi {
+ struct spi_master *master;
+ void __iomem *base;
+ struct clk *clk;
+ struct clk *axi_clk;
+ const struct orion_spi_dev *devdata;
+ struct device *dev;
+
+ struct orion_child_options child[ORION_NUM_CHIPSELECTS];
+};
+
+#ifdef CONFIG_PM
+static int orion_spi_runtime_suspend(struct device *dev);
+static int orion_spi_runtime_resume(struct device *dev);
+#endif
+
+static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg)
+{
+ return orion_spi->base + reg;
+}
+
+static inline void
+orion_spi_setbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
+{
+ void __iomem *reg_addr = spi_reg(orion_spi, reg);
+ u32 val;
+
+ val = readl(reg_addr);
+ val |= mask;
+ writel(val, reg_addr);
+}
+
+static inline void
+orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
+{
+ void __iomem *reg_addr = spi_reg(orion_spi, reg);
+ u32 val;
+
+ val = readl(reg_addr);
+ val &= ~mask;
+ writel(val, reg_addr);
+}
+
+static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
+{
+ u32 tclk_hz;
+ u32 rate;
+ u32 prescale;
+ u32 reg;
+ struct orion_spi *orion_spi;
+ const struct orion_spi_dev *devdata;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+ devdata = orion_spi->devdata;
+
+ tclk_hz = clk_get_rate(orion_spi->clk);
+
+ if (devdata->typ == ARMADA_SPI) {
+ /*
+ * Given the core_clk (tclk_hz) and the target rate (speed) we
+ * determine the best values for SPR (in [0 .. 15]) and SPPR (in
+ * [0..7]) such that
+ *
+ * core_clk / (SPR * 2 ** SPPR)
+ *
+ * is as big as possible but not bigger than speed.
+ */
+
+ /* best integer divider: */
+ unsigned divider = DIV_ROUND_UP(tclk_hz, speed);
+ unsigned spr, sppr;
+
+ if (divider < 16) {
+ /* This is the easy case, divider is less than 16 */
+ spr = divider;
+ sppr = 0;
+
+ } else {
+ unsigned two_pow_sppr;
+ /*
+ * Find the highest bit set in divider. This and the
+ * three next bits define SPR (apart from rounding).
+ * SPPR is then the number of zero bits that must be
+ * appended:
+ */
+ sppr = fls(divider) - 4;
+
+ /*
+ * As SPR only has 4 bits, we have to round divider up
+ * to the next multiple of 2 ** sppr.
+ */
+ two_pow_sppr = 1 << sppr;
+ divider = (divider + two_pow_sppr - 1) & -two_pow_sppr;
+
+ /*
+ * recalculate sppr as rounding up divider might have
+ * increased it enough to change the position of the
+ * highest set bit. In this case the bit that now
+ * doesn't make it into SPR is 0, so there is no need to
+ * round again.
+ */
+ sppr = fls(divider) - 4;
+ spr = divider >> sppr;
+
+ /*
+ * Now do range checking. SPR is constructed to have a
+ * width of 4 bits, so this is fine for sure. So we
+ * still need to check for sppr to fit into 3 bits:
+ */
+ if (sppr > 7)
+ return -EINVAL;
+ }
+
+ prescale = ((sppr & 0x6) << 5) | ((sppr & 0x1) << 4) | spr;
+ } else {
+ /*
+ * the supported rates are: 4,6,8...30
+ * round up as we look for equal or less speed
+ */
+ rate = DIV_ROUND_UP(tclk_hz, speed);
+ rate = roundup(rate, 2);
+
+ /* check if requested speed is too small */
+ if (rate > 30)
+ return -EINVAL;
+
+ if (rate < 4)
+ rate = 4;
+
+ /* Convert the rate to SPI clock divisor value. */
+ prescale = 0x10 + rate/2;
+ }
+
+ reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+ reg = ((reg & ~devdata->prescale_mask) | prescale);
+ writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+
+ return 0;
+}
+
+static void
+orion_spi_mode_set(struct spi_device *spi)
+{
+ u32 reg;
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+ reg &= ~ORION_SPI_MODE_MASK;
+ if (spi->mode & SPI_CPOL)
+ reg |= ORION_SPI_MODE_CPOL;
+ if (spi->mode & SPI_CPHA)
+ reg |= ORION_SPI_MODE_CPHA;
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= ORION_SPI_IF_RXLSBF | ORION_SPI_IF_TXLSBF;
+ else
+ reg &= ~(ORION_SPI_IF_RXLSBF | ORION_SPI_IF_TXLSBF);
+
+ writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+}
+
+static void
+orion_spi_50mhz_ac_timing_erratum(struct spi_device *spi, unsigned int speed)
+{
+ u32 reg;
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ /*
+ * Erratum description: (Erratum NO. FE-9144572) The device
+ * SPI interface supports frequencies of up to 50 MHz.
+ * However, due to this erratum, when the device core clock is
+ * 250 MHz and the SPI interfaces is configured for 50MHz SPI
+ * clock and CPOL=CPHA=1 there might occur data corruption on
+ * reads from the SPI device.
+ * Erratum Workaround:
+ * Work in one of the following configurations:
+ * 1. Set CPOL=CPHA=0 in "SPI Interface Configuration
+ * Register".
+ * 2. Set TMISO_SAMPLE value to 0x2 in "SPI Timing Parameters 1
+ * Register" before setting the interface.
+ */
+ reg = readl(spi_reg(orion_spi, ORION_SPI_TIMING_PARAMS_REG));
+ reg &= ~ORION_SPI_TMISO_SAMPLE_MASK;
+
+ if (clk_get_rate(orion_spi->clk) == 250000000 &&
+ speed == 50000000 && spi->mode & SPI_CPOL &&
+ spi->mode & SPI_CPHA)
+ reg |= ORION_SPI_TMISO_SAMPLE_2;
+ else
+ reg |= ORION_SPI_TMISO_SAMPLE_1; /* This is the default value */
+
+ writel(reg, spi_reg(orion_spi, ORION_SPI_TIMING_PARAMS_REG));
+}
+
+/*
+ * called only when no transfer is active on the bus
+ */
+static int
+orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct orion_spi *orion_spi;
+ unsigned int speed = spi->max_speed_hz;
+ unsigned int bits_per_word = spi->bits_per_word;
+ int rc;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ if ((t != NULL) && t->speed_hz)
+ speed = t->speed_hz;
+
+ if ((t != NULL) && t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+
+ orion_spi_mode_set(spi);
+
+ if (orion_spi->devdata->is_errata_50mhz_ac)
+ orion_spi_50mhz_ac_timing_erratum(spi, speed);
+
+ rc = orion_spi_baudrate_set(spi, speed);
+ if (rc)
+ return rc;
+
+ if (bits_per_word == 16)
+ orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+ else
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+
+ return 0;
+}
+
+static void orion_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct orion_spi *orion_spi;
+ void __iomem *ctrl_reg;
+ u32 val;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+ ctrl_reg = spi_reg(orion_spi, ORION_SPI_IF_CTRL_REG);
+
+ val = readl(ctrl_reg);
+
+ /* Clear existing chip-select and assertion state */
+ val &= ~(ORION_SPI_CS_MASK | 0x1);
+
+ /*
+ * If this line is using a GPIO to control chip select, this internal
+ * .set_cs() function will still be called, so we clear any previous
+ * chip select. The CS we activate will not have any elecrical effect,
+ * as it is handled by a GPIO, but that doesn't matter. What we need
+ * is to deassert the old chip select and assert some other chip select.
+ */
+ val |= ORION_SPI_CS(spi->chip_select);
+
+ /*
+ * Chip select logic is inverted from spi_set_cs(). For lines using a
+ * GPIO to do chip select SPI_CS_HIGH is enforced and inversion happens
+ * in the GPIO library, but we don't care about that, because in those
+ * cases we are dealing with an unused native CS anyways so the polarity
+ * doesn't matter.
+ */
+ if (!enable)
+ val |= 0x1;
+
+ /*
+ * To avoid toggling unwanted chip selects update the register
+ * with a single write.
+ */
+ writel(val, ctrl_reg);
+}
+
+static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi)
+{
+ int i;
+
+ for (i = 0; i < ORION_SPI_WAIT_RDY_MAX_LOOP; i++) {
+ if (readl(spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG)))
+ return 1;
+
+ udelay(1);
+ }
+
+ return -1;
+}
+
+static inline int
+orion_spi_write_read_8bit(struct spi_device *spi,
+ const u8 **tx_buf, u8 **rx_buf)
+{
+ void __iomem *tx_reg, *rx_reg, *int_reg;
+ struct orion_spi *orion_spi;
+ bool cs_single_byte;
+
+ cs_single_byte = spi->mode & SPI_CS_WORD;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ if (cs_single_byte)
+ orion_spi_set_cs(spi, 0);
+
+ tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
+ rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
+ int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
+
+ /* clear the interrupt cause register */
+ writel(0x0, int_reg);
+
+ if (tx_buf && *tx_buf)
+ writel(*(*tx_buf)++, tx_reg);
+ else
+ writel(0, tx_reg);
+
+ if (orion_spi_wait_till_ready(orion_spi) < 0) {
+ if (cs_single_byte) {
+ orion_spi_set_cs(spi, 1);
+ /* Satisfy some SLIC devices requirements */
+ udelay(4);
+ }
+ dev_err(&spi->dev, "TXS timed out\n");
+ return -1;
+ }
+
+ if (rx_buf && *rx_buf)
+ *(*rx_buf)++ = readl(rx_reg);
+
+ if (cs_single_byte) {
+ orion_spi_set_cs(spi, 1);
+ /* Satisfy some SLIC devices requirements */
+ udelay(4);
+ }
+
+ return 1;
+}
+
+static inline int
+orion_spi_write_read_16bit(struct spi_device *spi,
+ const u16 **tx_buf, u16 **rx_buf)
+{
+ void __iomem *tx_reg, *rx_reg, *int_reg;
+ struct orion_spi *orion_spi;
+
+ if (spi->mode & SPI_CS_WORD) {
+ dev_err(&spi->dev, "SPI_CS_WORD is only supported for 8 bit words\n");
+ return -1;
+ }
+
+ orion_spi = spi_master_get_devdata(spi->master);
+ tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
+ rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
+ int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
+
+ /* clear the interrupt cause register */
+ writel(0x0, int_reg);
+
+ if (tx_buf && *tx_buf)
+ writel(__cpu_to_le16(get_unaligned((*tx_buf)++)), tx_reg);
+ else
+ writel(0, tx_reg);
+
+ if (orion_spi_wait_till_ready(orion_spi) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ return -1;
+ }
+
+ if (rx_buf && *rx_buf)
+ put_unaligned(__le16_to_cpu(readl(rx_reg)), (*rx_buf)++);
+
+ return 1;
+}
+
+static unsigned int
+orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ unsigned int count;
+ int word_len;
+ struct orion_spi *orion_spi;
+ int cs = spi->chip_select;
+ void __iomem *vaddr;
+
+ word_len = spi->bits_per_word;
+ count = xfer->len;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ /*
+ * Use SPI direct write mode if base address is available
+ * and SPI_CS_WORD flag is not set.
+ * Otherwise fall back to PIO mode for this transfer.
+ */
+ vaddr = orion_spi->child[cs].direct_access.vaddr;
+
+ if (vaddr && xfer->tx_buf && word_len == 8 && (spi->mode & SPI_CS_WORD) == 0) {
+ unsigned int cnt = count / 4;
+ unsigned int rem = count % 4;
+
+ /*
+ * Send the TX-data to the SPI device via the direct
+ * mapped address window
+ */
+ iowrite32_rep(vaddr, xfer->tx_buf, cnt);
+ if (rem) {
+ u32 *buf = (u32 *)xfer->tx_buf;
+
+ iowrite8_rep(vaddr, &buf[cnt], rem);
+ }
+
+ return count;
+ }
+
+ if (word_len == 8) {
+ const u8 *tx = xfer->tx_buf;
+ u8 *rx = xfer->rx_buf;
+
+ do {
+ if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0)
+ goto out;
+ count--;
+ spi_delay_exec(&xfer->word_delay, xfer);
+ } while (count);
+ } else if (word_len == 16) {
+ const u16 *tx = xfer->tx_buf;
+ u16 *rx = xfer->rx_buf;
+
+ do {
+ if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0)
+ goto out;
+ count -= 2;
+ spi_delay_exec(&xfer->word_delay, xfer);
+ } while (count);
+ }
+
+out:
+ return xfer->len - count;
+}
+
+static int orion_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ int status = 0;
+
+ status = orion_spi_setup_transfer(spi, t);
+ if (status < 0)
+ return status;
+
+ if (t->len)
+ orion_spi_write_read(spi, t);
+
+ return status;
+}
+
+static int orion_spi_setup(struct spi_device *spi)
+{
+ int ret;
+#ifdef CONFIG_PM
+ struct orion_spi *orion_spi = spi_master_get_devdata(spi->master);
+ struct device *dev = orion_spi->dev;
+
+ orion_spi_runtime_resume(dev);
+#endif
+
+ ret = orion_spi_setup_transfer(spi, NULL);
+
+#ifdef CONFIG_PM
+ orion_spi_runtime_suspend(dev);
+#endif
+
+ return ret;
+}
+
+static int orion_spi_reset(struct orion_spi *orion_spi)
+{
+ /* Verify that the CS is deasserted */
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
+
+ /* Don't deassert CS between the direct mapped SPI transfers */
+ writel(0, spi_reg(orion_spi, SPI_DIRECT_WRITE_CONFIG_REG));
+
+ return 0;
+}
+
+static const struct orion_spi_dev orion_spi_dev_data = {
+ .typ = ORION_SPI,
+ .min_divisor = 4,
+ .max_divisor = 30,
+ .prescale_mask = ORION_SPI_CLK_PRESCALE_MASK,
+};
+
+static const struct orion_spi_dev armada_370_spi_dev_data = {
+ .typ = ARMADA_SPI,
+ .min_divisor = 4,
+ .max_divisor = 1920,
+ .max_hz = 50000000,
+ .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+};
+
+static const struct orion_spi_dev armada_xp_spi_dev_data = {
+ .typ = ARMADA_SPI,
+ .max_hz = 50000000,
+ .max_divisor = 1920,
+ .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+};
+
+static const struct orion_spi_dev armada_375_spi_dev_data = {
+ .typ = ARMADA_SPI,
+ .min_divisor = 15,
+ .max_divisor = 1920,
+ .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+};
+
+static const struct orion_spi_dev armada_380_spi_dev_data = {
+ .typ = ARMADA_SPI,
+ .max_hz = 50000000,
+ .max_divisor = 1920,
+ .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+ .is_errata_50mhz_ac = true,
+};
+
+static const struct of_device_id orion_spi_of_match_table[] = {
+ {
+ .compatible = "marvell,orion-spi",
+ .data = &orion_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-370-spi",
+ .data = &armada_370_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-375-spi",
+ .data = &armada_375_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-380-spi",
+ .data = &armada_380_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-390-spi",
+ .data = &armada_xp_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-xp-spi",
+ .data = &armada_xp_spi_dev_data,
+ },
+
+ {}
+};
+MODULE_DEVICE_TABLE(of, orion_spi_of_match_table);
+
+static int orion_spi_probe(struct platform_device *pdev)
+{
+ const struct orion_spi_dev *devdata;
+ struct spi_master *master;
+ struct orion_spi *spi;
+ struct resource *r;
+ unsigned long tclk_hz;
+ int status = 0;
+ struct device_node *np;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi));
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+ if (pdev->dev.of_node) {
+ u32 cell_index;
+
+ if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
+ &cell_index))
+ master->bus_num = cell_index;
+ }
+
+ /* we support all 4 SPI modes and LSB first option */
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST | SPI_CS_WORD;
+ master->set_cs = orion_spi_set_cs;
+ master->transfer_one = orion_spi_transfer_one;
+ master->num_chipselect = ORION_NUM_CHIPSELECTS;
+ master->setup = orion_spi_setup;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->auto_runtime_pm = true;
+ master->use_gpio_descriptors = true;
+ master->flags = SPI_MASTER_GPIO_SS;
+
+ platform_set_drvdata(pdev, master);
+
+ spi = spi_master_get_devdata(master);
+ spi->master = master;
+ spi->dev = &pdev->dev;
+
+ devdata = device_get_match_data(&pdev->dev);
+ devdata = devdata ? devdata : &orion_spi_dev_data;
+ spi->devdata = devdata;
+
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ status = PTR_ERR(spi->clk);
+ goto out;
+ }
+
+ status = clk_prepare_enable(spi->clk);
+ if (status)
+ goto out;
+
+ /* The following clock is only used by some SoCs */
+ spi->axi_clk = devm_clk_get(&pdev->dev, "axi");
+ if (PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
+ status = -EPROBE_DEFER;
+ goto out_rel_clk;
+ }
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
+
+ tclk_hz = clk_get_rate(spi->clk);
+
+ /*
+ * With old device tree, armada-370-spi could be used with
+ * Armada XP, however for this SoC the maximum frequency is
+ * 50MHz instead of tclk/4. On Armada 370, tclk cannot be
+ * higher than 200MHz. So, in order to be able to handle both
+ * SoCs, we can take the minimum of 50MHz and tclk/4.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-370-spi"))
+ master->max_speed_hz = min(devdata->max_hz,
+ DIV_ROUND_UP(tclk_hz, devdata->min_divisor));
+ else if (devdata->min_divisor)
+ master->max_speed_hz =
+ DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
+ else
+ master->max_speed_hz = devdata->max_hz;
+ master->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(spi->base)) {
+ status = PTR_ERR(spi->base);
+ goto out_rel_axi_clk;
+ }
+
+ for_each_available_child_of_node(pdev->dev.of_node, np) {
+ struct orion_direct_acc *dir_acc;
+ u32 cs;
+
+ /* Get chip-select number from the "reg" property */
+ status = of_property_read_u32(np, "reg", &cs);
+ if (status) {
+ dev_err(&pdev->dev,
+ "%pOF has no valid 'reg' property (%d)\n",
+ np, status);
+ continue;
+ }
+
+ /*
+ * Check if an address is configured for this SPI device. If
+ * not, the MBus mapping via the 'ranges' property in the 'soc'
+ * node is not configured and this device should not use the
+ * direct mode. In this case, just continue with the next
+ * device.
+ */
+ status = of_address_to_resource(pdev->dev.of_node, cs + 1, r);
+ if (status)
+ continue;
+
+ /*
+ * Only map one page for direct access. This is enough for the
+ * simple TX transfer which only writes to the first word.
+ * This needs to get extended for the direct SPI NOR / SPI NAND
+ * support, once this gets implemented.
+ */
+ dir_acc = &spi->child[cs].direct_access;
+ dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
+ if (!dir_acc->vaddr) {
+ status = -ENOMEM;
+ of_node_put(np);
+ goto out_rel_axi_clk;
+ }
+ dir_acc->size = PAGE_SIZE;
+
+ dev_info(&pdev->dev, "CS%d configured for direct access\n", cs);
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_enable(&pdev->dev);
+
+ status = orion_spi_reset(spi);
+ if (status < 0)
+ goto out_rel_pm;
+
+ master->dev.of_node = pdev->dev.of_node;
+ status = spi_register_master(master);
+ if (status < 0)
+ goto out_rel_pm;
+
+ return status;
+
+out_rel_pm:
+ pm_runtime_disable(&pdev->dev);
+out_rel_axi_clk:
+ clk_disable_unprepare(spi->axi_clk);
+out_rel_clk:
+ clk_disable_unprepare(spi->clk);
+out:
+ spi_master_put(master);
+ return status;
+}
+
+
+static int orion_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct orion_spi *spi = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(&pdev->dev);
+ clk_disable_unprepare(spi->axi_clk);
+ clk_disable_unprepare(spi->clk);
+
+ spi_unregister_master(master);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+
+#ifdef CONFIG_PM
+static int orion_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct orion_spi *spi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spi->axi_clk);
+ clk_disable_unprepare(spi->clk);
+ return 0;
+}
+
+static int orion_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct orion_spi *spi = spi_master_get_devdata(master);
+
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
+ return clk_prepare_enable(spi->clk);
+}
+#endif
+
+static const struct dev_pm_ops orion_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(orion_spi_runtime_suspend,
+ orion_spi_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver orion_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &orion_spi_pm_ops,
+ .of_match_table = of_match_ptr(orion_spi_of_match_table),
+ },
+ .probe = orion_spi_probe,
+ .remove = orion_spi_remove,
+};
+
+module_platform_driver(orion_spi_driver);
+
+MODULE_DESCRIPTION("Orion SPI driver");
+MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
new file mode 100644
index 000000000..86ad17597
--- /dev/null
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PIC32 Quad SPI controller driver.
+ *
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+/* SQI registers */
+#define PESQI_XIP_CONF1_REG 0x00
+#define PESQI_XIP_CONF2_REG 0x04
+#define PESQI_CONF_REG 0x08
+#define PESQI_CTRL_REG 0x0C
+#define PESQI_CLK_CTRL_REG 0x10
+#define PESQI_CMD_THRES_REG 0x14
+#define PESQI_INT_THRES_REG 0x18
+#define PESQI_INT_ENABLE_REG 0x1C
+#define PESQI_INT_STAT_REG 0x20
+#define PESQI_TX_DATA_REG 0x24
+#define PESQI_RX_DATA_REG 0x28
+#define PESQI_STAT1_REG 0x2C
+#define PESQI_STAT2_REG 0x30
+#define PESQI_BD_CTRL_REG 0x34
+#define PESQI_BD_CUR_ADDR_REG 0x38
+#define PESQI_BD_BASE_ADDR_REG 0x40
+#define PESQI_BD_STAT_REG 0x44
+#define PESQI_BD_POLL_CTRL_REG 0x48
+#define PESQI_BD_TX_DMA_STAT_REG 0x4C
+#define PESQI_BD_RX_DMA_STAT_REG 0x50
+#define PESQI_THRES_REG 0x54
+#define PESQI_INT_SIGEN_REG 0x58
+
+/* PESQI_CONF_REG fields */
+#define PESQI_MODE 0x7
+#define PESQI_MODE_BOOT 0
+#define PESQI_MODE_PIO 1
+#define PESQI_MODE_DMA 2
+#define PESQI_MODE_XIP 3
+#define PESQI_MODE_SHIFT 0
+#define PESQI_CPHA BIT(3)
+#define PESQI_CPOL BIT(4)
+#define PESQI_LSBF BIT(5)
+#define PESQI_RXLATCH BIT(7)
+#define PESQI_SERMODE BIT(8)
+#define PESQI_WP_EN BIT(9)
+#define PESQI_HOLD_EN BIT(10)
+#define PESQI_BURST_EN BIT(12)
+#define PESQI_CS_CTRL_HW BIT(15)
+#define PESQI_SOFT_RESET BIT(16)
+#define PESQI_LANES_SHIFT 20
+#define PESQI_SINGLE_LANE 0
+#define PESQI_DUAL_LANE 1
+#define PESQI_QUAD_LANE 2
+#define PESQI_CSEN_SHIFT 24
+#define PESQI_EN BIT(23)
+
+/* PESQI_CLK_CTRL_REG fields */
+#define PESQI_CLK_EN BIT(0)
+#define PESQI_CLK_STABLE BIT(1)
+#define PESQI_CLKDIV_SHIFT 8
+#define PESQI_CLKDIV 0xff
+
+/* PESQI_INT_THR/CMD_THR_REG */
+#define PESQI_TXTHR_MASK 0x1f
+#define PESQI_TXTHR_SHIFT 8
+#define PESQI_RXTHR_MASK 0x1f
+#define PESQI_RXTHR_SHIFT 0
+
+/* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
+#define PESQI_TXEMPTY BIT(0)
+#define PESQI_TXFULL BIT(1)
+#define PESQI_TXTHR BIT(2)
+#define PESQI_RXEMPTY BIT(3)
+#define PESQI_RXFULL BIT(4)
+#define PESQI_RXTHR BIT(5)
+#define PESQI_BDDONE BIT(9) /* BD processing complete */
+#define PESQI_PKTCOMP BIT(10) /* packet processing complete */
+#define PESQI_DMAERR BIT(11) /* error */
+
+/* PESQI_BD_CTRL_REG */
+#define PESQI_DMA_EN BIT(0) /* enable DMA engine */
+#define PESQI_POLL_EN BIT(1) /* enable polling */
+#define PESQI_BDP_START BIT(2) /* start BD processor */
+
+/* PESQI controller buffer descriptor */
+struct buf_desc {
+ u32 bd_ctrl; /* control */
+ u32 bd_status; /* reserved */
+ u32 bd_addr; /* DMA buffer addr */
+ u32 bd_nextp; /* next item in chain */
+};
+
+/* bd_ctrl */
+#define BD_BUFLEN 0x1ff
+#define BD_CBD_INT_EN BIT(16) /* Current BD is processed */
+#define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */
+#define BD_LIFM BIT(18) /* last data of pkt */
+#define BD_LAST BIT(19) /* end of list */
+#define BD_DATA_RECV BIT(20) /* receive data */
+#define BD_DDR BIT(21) /* DDR mode */
+#define BD_DUAL BIT(22) /* Dual SPI */
+#define BD_QUAD BIT(23) /* Quad SPI */
+#define BD_LSBF BIT(25) /* LSB First */
+#define BD_STAT_CHECK BIT(27) /* Status poll */
+#define BD_DEVSEL_SHIFT 28 /* CS */
+#define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */
+#define BD_EN BIT(31) /* BD owned by H/W */
+
+/**
+ * struct ring_desc - Representation of SQI ring descriptor
+ * @list: list element to add to free or used list.
+ * @bd: PESQI controller buffer descriptor
+ * @bd_dma: DMA address of PESQI controller buffer descriptor
+ * @xfer_len: transfer length
+ */
+struct ring_desc {
+ struct list_head list;
+ struct buf_desc *bd;
+ dma_addr_t bd_dma;
+ u32 xfer_len;
+};
+
+/* Global constants */
+#define PESQI_BD_BUF_LEN_MAX 256
+#define PESQI_BD_COUNT 256 /* max 64KB data per spi message */
+
+struct pic32_sqi {
+ void __iomem *regs;
+ struct clk *sys_clk;
+ struct clk *base_clk; /* drives spi clock */
+ struct spi_master *master;
+ int irq;
+ struct completion xfer_done;
+ struct ring_desc *ring;
+ void *bd;
+ dma_addr_t bd_dma;
+ struct list_head bd_list_free; /* free */
+ struct list_head bd_list_used; /* allocated */
+ struct spi_device *cur_spi;
+ u32 cur_speed;
+ u8 cur_mode;
+};
+
+static inline void pic32_setbits(void __iomem *reg, u32 set)
+{
+ writel(readl(reg) | set, reg);
+}
+
+static inline void pic32_clrbits(void __iomem *reg, u32 clr)
+{
+ writel(readl(reg) & ~clr, reg);
+}
+
+static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
+{
+ u32 val, div;
+
+ /* div = base_clk / (2 * spi_clk) */
+ div = clk_get_rate(sqi->base_clk) / (2 * sck);
+ div &= PESQI_CLKDIV;
+
+ val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
+ /* apply new divider */
+ val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
+ val |= div << PESQI_CLKDIV_SHIFT;
+ writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
+
+ /* wait for stability */
+ return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
+ val & PESQI_CLK_STABLE, 1, 5000);
+}
+
+static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
+{
+ u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
+
+ writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
+ /* INT_SIGEN works as interrupt-gate to INTR line */
+ writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
+}
+
+static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
+{
+ writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
+ writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
+}
+
+static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
+{
+ struct pic32_sqi *sqi = dev_id;
+ u32 enable, status;
+
+ enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
+ status = readl(sqi->regs + PESQI_INT_STAT_REG);
+
+ /* check spurious interrupt */
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & PESQI_DMAERR) {
+ enable = 0;
+ goto irq_done;
+ }
+
+ if (status & PESQI_TXTHR)
+ enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
+
+ if (status & PESQI_RXTHR)
+ enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
+
+ if (status & PESQI_BDDONE)
+ enable &= ~PESQI_BDDONE;
+
+ /* packet processing completed */
+ if (status & PESQI_PKTCOMP) {
+ /* mask all interrupts */
+ enable = 0;
+ /* complete trasaction */
+ complete(&sqi->xfer_done);
+ }
+
+irq_done:
+ /* interrupts are sticky, so mask when handled */
+ writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
+
+ return IRQ_HANDLED;
+}
+
+static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
+{
+ struct ring_desc *rdesc;
+
+ if (list_empty(&sqi->bd_list_free))
+ return NULL;
+
+ rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
+ list_move_tail(&rdesc->list, &sqi->bd_list_used);
+ return rdesc;
+}
+
+static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
+{
+ list_move(&rdesc->list, &sqi->bd_list_free);
+}
+
+static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
+ struct spi_message *mesg,
+ struct spi_transfer *xfer)
+{
+ struct spi_device *spi = mesg->spi;
+ struct scatterlist *sg, *sgl;
+ struct ring_desc *rdesc;
+ struct buf_desc *bd;
+ int nents, i;
+ u32 bd_ctrl;
+ u32 nbits;
+
+ /* Device selection */
+ bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT;
+
+ /* half-duplex: select transfer buffer, direction and lane */
+ if (xfer->rx_buf) {
+ bd_ctrl |= BD_DATA_RECV;
+ nbits = xfer->rx_nbits;
+ sgl = xfer->rx_sg.sgl;
+ nents = xfer->rx_sg.nents;
+ } else {
+ nbits = xfer->tx_nbits;
+ sgl = xfer->tx_sg.sgl;
+ nents = xfer->tx_sg.nents;
+ }
+
+ if (nbits & SPI_NBITS_QUAD)
+ bd_ctrl |= BD_QUAD;
+ else if (nbits & SPI_NBITS_DUAL)
+ bd_ctrl |= BD_DUAL;
+
+ /* LSB first */
+ if (spi->mode & SPI_LSB_FIRST)
+ bd_ctrl |= BD_LSBF;
+
+ /* ownership to hardware */
+ bd_ctrl |= BD_EN;
+
+ for_each_sg(sgl, sg, nents, i) {
+ /* get ring descriptor */
+ rdesc = ring_desc_get(sqi);
+ if (!rdesc)
+ break;
+
+ bd = rdesc->bd;
+
+ /* BD CTRL: length */
+ rdesc->xfer_len = sg_dma_len(sg);
+ bd->bd_ctrl = bd_ctrl;
+ bd->bd_ctrl |= rdesc->xfer_len;
+
+ /* BD STAT */
+ bd->bd_status = 0;
+
+ /* BD BUFFER ADDRESS */
+ bd->bd_addr = sg->dma_address;
+ }
+
+ return 0;
+}
+
+static int pic32_sqi_prepare_hardware(struct spi_master *master)
+{
+ struct pic32_sqi *sqi = spi_master_get_devdata(master);
+
+ /* enable spi interface */
+ pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
+ /* enable spi clk */
+ pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
+
+ return 0;
+}
+
+static bool pic32_sqi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *x)
+{
+ /* Do DMA irrespective of transfer size */
+ return true;
+}
+
+static int pic32_sqi_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_device *spi = msg->spi;
+ struct ring_desc *rdesc, *next;
+ struct spi_transfer *xfer;
+ struct pic32_sqi *sqi;
+ int ret = 0, mode;
+ unsigned long timeout;
+ u32 val;
+
+ sqi = spi_master_get_devdata(master);
+
+ reinit_completion(&sqi->xfer_done);
+ msg->actual_length = 0;
+
+ /* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
+ * and "delay_usecs". But spi_device specific speed and mode change
+ * can be handled at best during spi chip-select switch.
+ */
+ if (sqi->cur_spi != spi) {
+ /* set spi speed */
+ if (sqi->cur_speed != spi->max_speed_hz) {
+ sqi->cur_speed = spi->max_speed_hz;
+ ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
+ if (ret)
+ dev_warn(&spi->dev, "set_clk, %d\n", ret);
+ }
+
+ /* set spi mode */
+ mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
+ if (sqi->cur_mode != mode) {
+ val = readl(sqi->regs + PESQI_CONF_REG);
+ val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
+ if (mode & SPI_CPOL)
+ val |= PESQI_CPOL;
+ if (mode & SPI_LSB_FIRST)
+ val |= PESQI_LSBF;
+ val |= PESQI_CPHA;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ sqi->cur_mode = mode;
+ }
+ sqi->cur_spi = spi;
+ }
+
+ /* prepare hardware desc-list(BD) for transfer(s) */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ ret = pic32_sqi_one_transfer(sqi, msg, xfer);
+ if (ret) {
+ dev_err(&spi->dev, "xfer %p err\n", xfer);
+ goto xfer_out;
+ }
+ }
+
+ /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
+ * element of the list.
+ */
+ rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
+ rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
+ BD_LIFM | BD_PKT_INT_EN;
+
+ /* set base address BD list for DMA engine */
+ rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
+ writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
+
+ /* enable interrupt */
+ pic32_sqi_enable_int(sqi);
+
+ /* enable DMA engine */
+ val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
+ writel(val, sqi->regs + PESQI_BD_CTRL_REG);
+
+ /* wait for xfer completion */
+ timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
+ if (timeout == 0) {
+ dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
+ ret = -ETIMEDOUT;
+ msg->status = ret;
+ } else {
+ /* success */
+ msg->status = 0;
+ ret = 0;
+ }
+
+ /* disable DMA */
+ writel(0, sqi->regs + PESQI_BD_CTRL_REG);
+
+ pic32_sqi_disable_int(sqi);
+
+xfer_out:
+ list_for_each_entry_safe_reverse(rdesc, next,
+ &sqi->bd_list_used, list) {
+ /* Update total byte transferred */
+ msg->actual_length += rdesc->xfer_len;
+ /* release ring descr */
+ ring_desc_put(sqi, rdesc);
+ }
+ spi_finalize_current_message(spi->master);
+
+ return ret;
+}
+
+static int pic32_sqi_unprepare_hardware(struct spi_master *master)
+{
+ struct pic32_sqi *sqi = spi_master_get_devdata(master);
+
+ /* disable clk */
+ pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
+ /* disable spi */
+ pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
+
+ return 0;
+}
+
+static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
+{
+ struct ring_desc *rdesc;
+ struct buf_desc *bd;
+ int i;
+
+ /* allocate coherent DMAable memory for hardware buffer descriptors. */
+ sqi->bd = dma_alloc_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ &sqi->bd_dma, GFP_KERNEL);
+ if (!sqi->bd) {
+ dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
+ return -ENOMEM;
+ }
+
+ /* allocate software ring descriptors */
+ sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
+ if (!sqi->ring) {
+ dma_free_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ sqi->bd, sqi->bd_dma);
+ return -ENOMEM;
+ }
+
+ bd = (struct buf_desc *)sqi->bd;
+
+ INIT_LIST_HEAD(&sqi->bd_list_free);
+ INIT_LIST_HEAD(&sqi->bd_list_used);
+
+ /* initialize ring-desc */
+ for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
+ INIT_LIST_HEAD(&rdesc->list);
+ rdesc->bd = &bd[i];
+ rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
+ list_add_tail(&rdesc->list, &sqi->bd_list_free);
+ }
+
+ /* Prepare BD: chain to next BD(s) */
+ for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++)
+ bd[i].bd_nextp = rdesc[i + 1].bd_dma;
+ bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
+
+ return 0;
+}
+
+static void ring_desc_ring_free(struct pic32_sqi *sqi)
+{
+ dma_free_coherent(&sqi->master->dev,
+ sizeof(struct buf_desc) * PESQI_BD_COUNT,
+ sqi->bd, sqi->bd_dma);
+ kfree(sqi->ring);
+}
+
+static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
+{
+ unsigned long flags;
+ u32 val;
+
+ /* Soft-reset of PESQI controller triggers interrupt.
+ * We are not yet ready to handle them so disable CPU
+ * interrupt for the time being.
+ */
+ local_irq_save(flags);
+
+ /* assert soft-reset */
+ writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
+
+ /* wait until clear */
+ readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
+ !(val & PESQI_SOFT_RESET), 1, 5000);
+
+ /* disable all interrupts */
+ pic32_sqi_disable_int(sqi);
+
+ /* Now it is safe to enable back CPU interrupt */
+ local_irq_restore(flags);
+
+ /* tx and rx fifo interrupt threshold */
+ val = readl(sqi->regs + PESQI_CMD_THRES_REG);
+ val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
+ val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
+ val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
+ writel(val, sqi->regs + PESQI_CMD_THRES_REG);
+
+ val = readl(sqi->regs + PESQI_INT_THRES_REG);
+ val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
+ val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
+ val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
+ writel(val, sqi->regs + PESQI_INT_THRES_REG);
+
+ /* default configuration */
+ val = readl(sqi->regs + PESQI_CONF_REG);
+
+ /* set mode: DMA */
+ val &= ~PESQI_MODE;
+ val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ /* DATAEN - SQIID0-ID3 */
+ val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
+
+ /* burst/INCR4 enable */
+ val |= PESQI_BURST_EN;
+
+ /* CSEN - all CS */
+ val |= 3U << PESQI_CSEN_SHIFT;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ /* write poll count */
+ writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
+
+ sqi->cur_speed = 0;
+ sqi->cur_mode = -1;
+}
+
+static int pic32_sqi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct pic32_sqi *sqi;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
+ if (!master)
+ return -ENOMEM;
+
+ sqi = spi_master_get_devdata(master);
+ sqi->master = master;
+
+ sqi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sqi->regs)) {
+ ret = PTR_ERR(sqi->regs);
+ goto err_free_master;
+ }
+
+ /* irq */
+ sqi->irq = platform_get_irq(pdev, 0);
+ if (sqi->irq < 0) {
+ ret = sqi->irq;
+ goto err_free_master;
+ }
+
+ /* clocks */
+ sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
+ if (IS_ERR(sqi->sys_clk)) {
+ ret = PTR_ERR(sqi->sys_clk);
+ dev_err(&pdev->dev, "no sys_clk ?\n");
+ goto err_free_master;
+ }
+
+ sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
+ if (IS_ERR(sqi->base_clk)) {
+ ret = PTR_ERR(sqi->base_clk);
+ dev_err(&pdev->dev, "no base clk ?\n");
+ goto err_free_master;
+ }
+
+ ret = clk_prepare_enable(sqi->sys_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "sys clk enable failed\n");
+ goto err_free_master;
+ }
+
+ ret = clk_prepare_enable(sqi->base_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "base clk enable failed\n");
+ clk_disable_unprepare(sqi->sys_clk);
+ goto err_free_master;
+ }
+
+ init_completion(&sqi->xfer_done);
+
+ /* initialize hardware */
+ pic32_sqi_hw_init(sqi);
+
+ /* allocate buffers & descriptors */
+ ret = ring_desc_ring_alloc(sqi);
+ if (ret) {
+ dev_err(&pdev->dev, "ring alloc failed\n");
+ goto err_disable_clk;
+ }
+
+ /* install irq handlers */
+ ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
+ dev_name(&pdev->dev), sqi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
+ goto err_free_ring;
+ }
+
+ /* register master */
+ master->num_chipselect = 2;
+ master->max_speed_hz = clk_get_rate(sqi->base_clk);
+ master->dma_alignment = 32;
+ master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
+ SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->can_dma = pic32_sqi_can_dma;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->transfer_one_message = pic32_sqi_one_message;
+ master->prepare_transfer_hardware = pic32_sqi_prepare_hardware;
+ master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&master->dev, "failed registering spi master\n");
+ free_irq(sqi->irq, sqi);
+ goto err_free_ring;
+ }
+
+ platform_set_drvdata(pdev, sqi);
+
+ return 0;
+
+err_free_ring:
+ ring_desc_ring_free(sqi);
+
+err_disable_clk:
+ clk_disable_unprepare(sqi->base_clk);
+ clk_disable_unprepare(sqi->sys_clk);
+
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int pic32_sqi_remove(struct platform_device *pdev)
+{
+ struct pic32_sqi *sqi = platform_get_drvdata(pdev);
+
+ /* release resources */
+ free_irq(sqi->irq, sqi);
+ ring_desc_ring_free(sqi);
+
+ /* disable clk */
+ clk_disable_unprepare(sqi->base_clk);
+ clk_disable_unprepare(sqi->sys_clk);
+
+ return 0;
+}
+
+static const struct of_device_id pic32_sqi_of_ids[] = {
+ {.compatible = "microchip,pic32mzda-sqi",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
+
+static struct platform_driver pic32_sqi_driver = {
+ .driver = {
+ .name = "sqi-pic32",
+ .of_match_table = of_match_ptr(pic32_sqi_of_ids),
+ },
+ .probe = pic32_sqi_probe,
+ .remove = pic32_sqi_remove,
+};
+
+module_platform_driver(pic32_sqi_driver);
+
+MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
+MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
new file mode 100644
index 000000000..7e5c09a7d
--- /dev/null
+++ b/drivers/spi/spi-pic32.c
@@ -0,0 +1,878 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Microchip PIC32 SPI controller driver.
+ *
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* SPI controller registers */
+struct pic32_spi_regs {
+ u32 ctrl;
+ u32 ctrl_clr;
+ u32 ctrl_set;
+ u32 ctrl_inv;
+ u32 status;
+ u32 status_clr;
+ u32 status_set;
+ u32 status_inv;
+ u32 buf;
+ u32 dontuse[3];
+ u32 baud;
+ u32 dontuse2[3];
+ u32 ctrl2;
+ u32 ctrl2_clr;
+ u32 ctrl2_set;
+ u32 ctrl2_inv;
+};
+
+/* Bit fields of SPI Control Register */
+#define CTRL_RX_INT_SHIFT 0 /* Rx interrupt generation */
+#define RX_FIFO_EMPTY 0
+#define RX_FIFO_NOT_EMPTY 1 /* not empty */
+#define RX_FIFO_HALF_FULL 2 /* full by half or more */
+#define RX_FIFO_FULL 3 /* completely full */
+
+#define CTRL_TX_INT_SHIFT 2 /* TX interrupt generation */
+#define TX_FIFO_ALL_EMPTY 0 /* completely empty */
+#define TX_FIFO_EMPTY 1 /* empty */
+#define TX_FIFO_HALF_EMPTY 2 /* empty by half or more */
+#define TX_FIFO_NOT_FULL 3 /* atleast one empty */
+
+#define CTRL_MSTEN BIT(5) /* enable master mode */
+#define CTRL_CKP BIT(6) /* active low */
+#define CTRL_CKE BIT(8) /* Tx on falling edge */
+#define CTRL_SMP BIT(9) /* Rx at middle or end of tx */
+#define CTRL_BPW_MASK 0x03 /* bits per word/sample */
+#define CTRL_BPW_SHIFT 10
+#define PIC32_BPW_8 0
+#define PIC32_BPW_16 1
+#define PIC32_BPW_32 2
+#define CTRL_SIDL BIT(13) /* sleep when idle */
+#define CTRL_ON BIT(15) /* enable macro */
+#define CTRL_ENHBUF BIT(16) /* enable enhanced buffering */
+#define CTRL_MCLKSEL BIT(23) /* select clock source */
+#define CTRL_MSSEN BIT(28) /* macro driven /SS */
+#define CTRL_FRMEN BIT(31) /* enable framing mode */
+
+/* Bit fields of SPI Status Register */
+#define STAT_RF_EMPTY BIT(5) /* RX Fifo empty */
+#define STAT_RX_OV BIT(6) /* err, s/w needs to clear */
+#define STAT_TX_UR BIT(8) /* UR in Framed SPI modes */
+#define STAT_FRM_ERR BIT(12) /* Multiple Frame Sync pulse */
+#define STAT_TF_LVL_MASK 0x1F
+#define STAT_TF_LVL_SHIFT 16
+#define STAT_RF_LVL_MASK 0x1F
+#define STAT_RF_LVL_SHIFT 24
+
+/* Bit fields of SPI Baud Register */
+#define BAUD_MASK 0x1ff
+
+/* Bit fields of SPI Control2 Register */
+#define CTRL2_TX_UR_EN BIT(10) /* Enable int on Tx under-run */
+#define CTRL2_RX_OV_EN BIT(11) /* Enable int on Rx over-run */
+#define CTRL2_FRM_ERR_EN BIT(12) /* Enable frame err int */
+
+/* Minimum DMA transfer size */
+#define PIC32_DMA_LEN_MIN 64
+
+struct pic32_spi {
+ dma_addr_t dma_base;
+ struct pic32_spi_regs __iomem *regs;
+ int fault_irq;
+ int rx_irq;
+ int tx_irq;
+ u32 fifo_n_byte; /* FIFO depth in bytes */
+ struct clk *clk;
+ struct spi_master *master;
+ /* Current controller setting */
+ u32 speed_hz; /* spi-clk rate */
+ u32 mode;
+ u32 bits_per_word;
+ u32 fifo_n_elm; /* FIFO depth in words */
+#define PIC32F_DMA_PREP 0 /* DMA chnls configured */
+ unsigned long flags;
+ /* Current transfer state */
+ struct completion xfer_done;
+ /* PIO transfer specific */
+ const void *tx;
+ const void *tx_end;
+ const void *rx;
+ const void *rx_end;
+ int len;
+ void (*rx_fifo)(struct pic32_spi *);
+ void (*tx_fifo)(struct pic32_spi *);
+};
+
+static inline void pic32_spi_enable(struct pic32_spi *pic32s)
+{
+ writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_set);
+}
+
+static inline void pic32_spi_disable(struct pic32_spi *pic32s)
+{
+ writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_clr);
+
+ /* avoid SPI registers read/write at immediate next CPU clock */
+ ndelay(20);
+}
+
+static void pic32_spi_set_clk_rate(struct pic32_spi *pic32s, u32 spi_ck)
+{
+ u32 div;
+
+ /* div = (clk_in / 2 * spi_ck) - 1 */
+ div = DIV_ROUND_CLOSEST(clk_get_rate(pic32s->clk), 2 * spi_ck) - 1;
+
+ writel(div & BAUD_MASK, &pic32s->regs->baud);
+}
+
+static inline u32 pic32_rx_fifo_level(struct pic32_spi *pic32s)
+{
+ u32 sr = readl(&pic32s->regs->status);
+
+ return (sr >> STAT_RF_LVL_SHIFT) & STAT_RF_LVL_MASK;
+}
+
+static inline u32 pic32_tx_fifo_level(struct pic32_spi *pic32s)
+{
+ u32 sr = readl(&pic32s->regs->status);
+
+ return (sr >> STAT_TF_LVL_SHIFT) & STAT_TF_LVL_MASK;
+}
+
+/* Return the max entries we can fill into tx fifo */
+static u32 pic32_tx_max(struct pic32_spi *pic32s, int n_bytes)
+{
+ u32 tx_left, tx_room, rxtx_gap;
+
+ tx_left = (pic32s->tx_end - pic32s->tx) / n_bytes;
+ tx_room = pic32s->fifo_n_elm - pic32_tx_fifo_level(pic32s);
+
+ /*
+ * Another concern is about the tx/rx mismatch, we
+ * though to use (pic32s->fifo_n_byte - rxfl - txfl) as
+ * one maximum value for tx, but it doesn't cover the
+ * data which is out of tx/rx fifo and inside the
+ * shift registers. So a ctrl from sw point of
+ * view is taken.
+ */
+ rxtx_gap = ((pic32s->rx_end - pic32s->rx) -
+ (pic32s->tx_end - pic32s->tx)) / n_bytes;
+ return min3(tx_left, tx_room, (u32)(pic32s->fifo_n_elm - rxtx_gap));
+}
+
+/* Return the max entries we should read out of rx fifo */
+static u32 pic32_rx_max(struct pic32_spi *pic32s, int n_bytes)
+{
+ u32 rx_left = (pic32s->rx_end - pic32s->rx) / n_bytes;
+
+ return min_t(u32, rx_left, pic32_rx_fifo_level(pic32s));
+}
+
+#define BUILD_SPI_FIFO_RW(__name, __type, __bwl) \
+static void pic32_spi_rx_##__name(struct pic32_spi *pic32s) \
+{ \
+ __type v; \
+ u32 mx = pic32_rx_max(pic32s, sizeof(__type)); \
+ for (; mx; mx--) { \
+ v = read##__bwl(&pic32s->regs->buf); \
+ if (pic32s->rx_end - pic32s->len) \
+ *(__type *)(pic32s->rx) = v; \
+ pic32s->rx += sizeof(__type); \
+ } \
+} \
+ \
+static void pic32_spi_tx_##__name(struct pic32_spi *pic32s) \
+{ \
+ __type v; \
+ u32 mx = pic32_tx_max(pic32s, sizeof(__type)); \
+ for (; mx ; mx--) { \
+ v = (__type)~0U; \
+ if (pic32s->tx_end - pic32s->len) \
+ v = *(__type *)(pic32s->tx); \
+ write##__bwl(v, &pic32s->regs->buf); \
+ pic32s->tx += sizeof(__type); \
+ } \
+}
+
+BUILD_SPI_FIFO_RW(byte, u8, b);
+BUILD_SPI_FIFO_RW(word, u16, w);
+BUILD_SPI_FIFO_RW(dword, u32, l);
+
+static void pic32_err_stop(struct pic32_spi *pic32s, const char *msg)
+{
+ /* disable all interrupts */
+ disable_irq_nosync(pic32s->fault_irq);
+ disable_irq_nosync(pic32s->rx_irq);
+ disable_irq_nosync(pic32s->tx_irq);
+
+ /* Show err message and abort xfer with err */
+ dev_err(&pic32s->master->dev, "%s\n", msg);
+ if (pic32s->master->cur_msg)
+ pic32s->master->cur_msg->status = -EIO;
+ complete(&pic32s->xfer_done);
+}
+
+static irqreturn_t pic32_spi_fault_irq(int irq, void *dev_id)
+{
+ struct pic32_spi *pic32s = dev_id;
+ u32 status;
+
+ status = readl(&pic32s->regs->status);
+
+ /* Error handling */
+ if (status & (STAT_RX_OV | STAT_TX_UR)) {
+ writel(STAT_RX_OV, &pic32s->regs->status_clr);
+ writel(STAT_TX_UR, &pic32s->regs->status_clr);
+ pic32_err_stop(pic32s, "err_irq: fifo ov/ur-run\n");
+ return IRQ_HANDLED;
+ }
+
+ if (status & STAT_FRM_ERR) {
+ pic32_err_stop(pic32s, "err_irq: frame error");
+ return IRQ_HANDLED;
+ }
+
+ if (!pic32s->master->cur_msg) {
+ pic32_err_stop(pic32s, "err_irq: no mesg");
+ return IRQ_NONE;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t pic32_spi_rx_irq(int irq, void *dev_id)
+{
+ struct pic32_spi *pic32s = dev_id;
+
+ pic32s->rx_fifo(pic32s);
+
+ /* rx complete ? */
+ if (pic32s->rx_end == pic32s->rx) {
+ /* disable all interrupts */
+ disable_irq_nosync(pic32s->fault_irq);
+ disable_irq_nosync(pic32s->rx_irq);
+
+ /* complete current xfer */
+ complete(&pic32s->xfer_done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t pic32_spi_tx_irq(int irq, void *dev_id)
+{
+ struct pic32_spi *pic32s = dev_id;
+
+ pic32s->tx_fifo(pic32s);
+
+ /* tx complete? disable tx interrupt */
+ if (pic32s->tx_end == pic32s->tx)
+ disable_irq_nosync(pic32s->tx_irq);
+
+ return IRQ_HANDLED;
+}
+
+static void pic32_spi_dma_rx_notify(void *data)
+{
+ struct pic32_spi *pic32s = data;
+
+ complete(&pic32s->xfer_done);
+}
+
+static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
+ struct spi_transfer *xfer)
+{
+ struct spi_master *master = pic32s->master;
+ struct dma_async_tx_descriptor *desc_rx;
+ struct dma_async_tx_descriptor *desc_tx;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (!master->dma_rx || !master->dma_tx)
+ return -ENODEV;
+
+ desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ ret = -EINVAL;
+ goto err_dma;
+ }
+
+ desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ ret = -EINVAL;
+ goto err_dma;
+ }
+
+ /* Put callback on the RX transfer, that should finish last */
+ desc_rx->callback = pic32_spi_dma_rx_notify;
+ desc_rx->callback_param = pic32s;
+
+ cookie = dmaengine_submit(desc_rx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ goto err_dma;
+
+ cookie = dmaengine_submit(desc_tx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ goto err_dma_tx;
+
+ dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(master->dma_tx);
+
+ return 0;
+
+err_dma_tx:
+ dmaengine_terminate_all(master->dma_rx);
+err_dma:
+ return ret;
+}
+
+static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
+{
+ int buf_offset = offsetof(struct pic32_spi_regs, buf);
+ struct spi_master *master = pic32s->master;
+ struct dma_slave_config cfg;
+ int ret;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.device_fc = true;
+ cfg.src_addr = pic32s->dma_base + buf_offset;
+ cfg.dst_addr = pic32s->dma_base + buf_offset;
+ cfg.src_maxburst = pic32s->fifo_n_elm / 2; /* fill one-half */
+ cfg.dst_maxburst = pic32s->fifo_n_elm / 2; /* drain one-half */
+ cfg.src_addr_width = dma_width;
+ cfg.dst_addr_width = dma_width;
+ /* tx channel */
+ cfg.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(master->dma_tx, &cfg);
+ if (ret) {
+ dev_err(&master->dev, "tx channel setup failed\n");
+ return ret;
+ }
+ /* rx channel */
+ cfg.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(master->dma_rx, &cfg);
+ if (ret)
+ dev_err(&master->dev, "rx channel setup failed\n");
+
+ return ret;
+}
+
+static int pic32_spi_set_word_size(struct pic32_spi *pic32s, u8 bits_per_word)
+{
+ enum dma_slave_buswidth dmawidth;
+ u32 buswidth, v;
+
+ switch (bits_per_word) {
+ case 8:
+ pic32s->rx_fifo = pic32_spi_rx_byte;
+ pic32s->tx_fifo = pic32_spi_tx_byte;
+ buswidth = PIC32_BPW_8;
+ dmawidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case 16:
+ pic32s->rx_fifo = pic32_spi_rx_word;
+ pic32s->tx_fifo = pic32_spi_tx_word;
+ buswidth = PIC32_BPW_16;
+ dmawidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 32:
+ pic32s->rx_fifo = pic32_spi_rx_dword;
+ pic32s->tx_fifo = pic32_spi_tx_dword;
+ buswidth = PIC32_BPW_32;
+ dmawidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ default:
+ /* not supported */
+ return -EINVAL;
+ }
+
+ /* calculate maximum number of words fifos can hold */
+ pic32s->fifo_n_elm = DIV_ROUND_UP(pic32s->fifo_n_byte,
+ bits_per_word / 8);
+ /* set word size */
+ v = readl(&pic32s->regs->ctrl);
+ v &= ~(CTRL_BPW_MASK << CTRL_BPW_SHIFT);
+ v |= buswidth << CTRL_BPW_SHIFT;
+ writel(v, &pic32s->regs->ctrl);
+
+ /* re-configure dma width, if required */
+ if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
+ pic32_spi_dma_config(pic32s, dmawidth);
+
+ return 0;
+}
+
+static int pic32_spi_prepare_hardware(struct spi_master *master)
+{
+ struct pic32_spi *pic32s = spi_master_get_devdata(master);
+
+ pic32_spi_enable(pic32s);
+
+ return 0;
+}
+
+static int pic32_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct pic32_spi *pic32s = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ u32 val;
+
+ /* set device specific bits_per_word */
+ if (pic32s->bits_per_word != spi->bits_per_word) {
+ pic32_spi_set_word_size(pic32s, spi->bits_per_word);
+ pic32s->bits_per_word = spi->bits_per_word;
+ }
+
+ /* device specific speed change */
+ if (pic32s->speed_hz != spi->max_speed_hz) {
+ pic32_spi_set_clk_rate(pic32s, spi->max_speed_hz);
+ pic32s->speed_hz = spi->max_speed_hz;
+ }
+
+ /* device specific mode change */
+ if (pic32s->mode != spi->mode) {
+ val = readl(&pic32s->regs->ctrl);
+ /* active low */
+ if (spi->mode & SPI_CPOL)
+ val |= CTRL_CKP;
+ else
+ val &= ~CTRL_CKP;
+ /* tx on rising edge */
+ if (spi->mode & SPI_CPHA)
+ val &= ~CTRL_CKE;
+ else
+ val |= CTRL_CKE;
+
+ /* rx at end of tx */
+ val |= CTRL_SMP;
+ writel(val, &pic32s->regs->ctrl);
+ pic32s->mode = spi->mode;
+ }
+
+ return 0;
+}
+
+static bool pic32_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct pic32_spi *pic32s = spi_master_get_devdata(master);
+
+ /* skip using DMA on small size transfer to avoid overhead.*/
+ return (xfer->len >= PIC32_DMA_LEN_MIN) &&
+ test_bit(PIC32F_DMA_PREP, &pic32s->flags);
+}
+
+static int pic32_spi_one_transfer(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct pic32_spi *pic32s;
+ bool dma_issued = false;
+ unsigned long timeout;
+ int ret;
+
+ pic32s = spi_master_get_devdata(master);
+
+ /* handle transfer specific word size change */
+ if (transfer->bits_per_word &&
+ (transfer->bits_per_word != pic32s->bits_per_word)) {
+ ret = pic32_spi_set_word_size(pic32s, transfer->bits_per_word);
+ if (ret)
+ return ret;
+ pic32s->bits_per_word = transfer->bits_per_word;
+ }
+
+ /* handle transfer specific speed change */
+ if (transfer->speed_hz && (transfer->speed_hz != pic32s->speed_hz)) {
+ pic32_spi_set_clk_rate(pic32s, transfer->speed_hz);
+ pic32s->speed_hz = transfer->speed_hz;
+ }
+
+ reinit_completion(&pic32s->xfer_done);
+
+ /* transact by DMA mode */
+ if (transfer->rx_sg.nents && transfer->tx_sg.nents) {
+ ret = pic32_spi_dma_transfer(pic32s, transfer);
+ if (ret) {
+ dev_err(&spi->dev, "dma submit error\n");
+ return ret;
+ }
+
+ /* DMA issued */
+ dma_issued = true;
+ } else {
+ /* set current transfer information */
+ pic32s->tx = (const void *)transfer->tx_buf;
+ pic32s->rx = (const void *)transfer->rx_buf;
+ pic32s->tx_end = pic32s->tx + transfer->len;
+ pic32s->rx_end = pic32s->rx + transfer->len;
+ pic32s->len = transfer->len;
+
+ /* transact by interrupt driven PIO */
+ enable_irq(pic32s->fault_irq);
+ enable_irq(pic32s->rx_irq);
+ enable_irq(pic32s->tx_irq);
+ }
+
+ /* wait for completion */
+ timeout = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ);
+ if (timeout == 0) {
+ dev_err(&spi->dev, "wait error/timedout\n");
+ if (dma_issued) {
+ dmaengine_terminate_all(master->dma_rx);
+ dmaengine_terminate_all(master->dma_tx);
+ }
+ ret = -ETIMEDOUT;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int pic32_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static int pic32_spi_unprepare_hardware(struct spi_master *master)
+{
+ struct pic32_spi *pic32s = spi_master_get_devdata(master);
+
+ pic32_spi_disable(pic32s);
+
+ return 0;
+}
+
+/* This may be called multiple times by same spi dev */
+static int pic32_spi_setup(struct spi_device *spi)
+{
+ if (!spi->max_speed_hz) {
+ dev_err(&spi->dev, "No max speed HZ parameter\n");
+ return -EINVAL;
+ }
+
+ /* PIC32 spi controller can drive /CS during transfer depending
+ * on tx fifo fill-level. /CS will stay asserted as long as TX
+ * fifo is non-empty, else will be deasserted indicating
+ * completion of the ongoing transfer. This might result into
+ * unreliable/erroneous SPI transactions.
+ * To avoid that we will always handle /CS by toggling GPIO.
+ */
+ if (!spi->cs_gpiod)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void pic32_spi_cleanup(struct spi_device *spi)
+{
+ /* de-activate cs-gpio, gpiolib will handle inversion */
+ gpiod_direction_output(spi->cs_gpiod, 0);
+}
+
+static int pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
+{
+ struct spi_master *master = pic32s->master;
+ int ret = 0;
+
+ master->dma_rx = dma_request_chan(dev, "spi-rx");
+ if (IS_ERR(master->dma_rx)) {
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+ ret = -EPROBE_DEFER;
+ else
+ dev_warn(dev, "RX channel not found.\n");
+
+ master->dma_rx = NULL;
+ goto out_err;
+ }
+
+ master->dma_tx = dma_request_chan(dev, "spi-tx");
+ if (IS_ERR(master->dma_tx)) {
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+ ret = -EPROBE_DEFER;
+ else
+ dev_warn(dev, "TX channel not found.\n");
+
+ master->dma_tx = NULL;
+ goto out_err;
+ }
+
+ if (pic32_spi_dma_config(pic32s, DMA_SLAVE_BUSWIDTH_1_BYTE))
+ goto out_err;
+
+ /* DMA chnls allocated and prepared */
+ set_bit(PIC32F_DMA_PREP, &pic32s->flags);
+
+ return 0;
+
+out_err:
+ if (master->dma_rx) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+
+ if (master->dma_tx) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+
+ return ret;
+}
+
+static void pic32_spi_dma_unprep(struct pic32_spi *pic32s)
+{
+ if (!test_bit(PIC32F_DMA_PREP, &pic32s->flags))
+ return;
+
+ clear_bit(PIC32F_DMA_PREP, &pic32s->flags);
+ if (pic32s->master->dma_rx)
+ dma_release_channel(pic32s->master->dma_rx);
+
+ if (pic32s->master->dma_tx)
+ dma_release_channel(pic32s->master->dma_tx);
+}
+
+static void pic32_spi_hw_init(struct pic32_spi *pic32s)
+{
+ u32 ctrl;
+
+ /* disable hardware */
+ pic32_spi_disable(pic32s);
+
+ ctrl = readl(&pic32s->regs->ctrl);
+ /* enable enhanced fifo of 128bit deep */
+ ctrl |= CTRL_ENHBUF;
+ pic32s->fifo_n_byte = 16;
+
+ /* disable framing mode */
+ ctrl &= ~CTRL_FRMEN;
+
+ /* enable master mode while disabled */
+ ctrl |= CTRL_MSTEN;
+
+ /* set tx fifo threshold interrupt */
+ ctrl &= ~(0x3 << CTRL_TX_INT_SHIFT);
+ ctrl |= (TX_FIFO_HALF_EMPTY << CTRL_TX_INT_SHIFT);
+
+ /* set rx fifo threshold interrupt */
+ ctrl &= ~(0x3 << CTRL_RX_INT_SHIFT);
+ ctrl |= (RX_FIFO_NOT_EMPTY << CTRL_RX_INT_SHIFT);
+
+ /* select clk source */
+ ctrl &= ~CTRL_MCLKSEL;
+
+ /* set manual /CS mode */
+ ctrl &= ~CTRL_MSSEN;
+
+ writel(ctrl, &pic32s->regs->ctrl);
+
+ /* enable error reporting */
+ ctrl = CTRL2_TX_UR_EN | CTRL2_RX_OV_EN | CTRL2_FRM_ERR_EN;
+ writel(ctrl, &pic32s->regs->ctrl2_set);
+}
+
+static int pic32_spi_hw_probe(struct platform_device *pdev,
+ struct pic32_spi *pic32s)
+{
+ struct resource *mem;
+ int ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pic32s->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(pic32s->regs))
+ return PTR_ERR(pic32s->regs);
+
+ pic32s->dma_base = mem->start;
+
+ /* get irq resources: err-irq, rx-irq, tx-irq */
+ pic32s->fault_irq = platform_get_irq_byname(pdev, "fault");
+ if (pic32s->fault_irq < 0)
+ return pic32s->fault_irq;
+
+ pic32s->rx_irq = platform_get_irq_byname(pdev, "rx");
+ if (pic32s->rx_irq < 0)
+ return pic32s->rx_irq;
+
+ pic32s->tx_irq = platform_get_irq_byname(pdev, "tx");
+ if (pic32s->tx_irq < 0)
+ return pic32s->tx_irq;
+
+ /* get clock */
+ pic32s->clk = devm_clk_get(&pdev->dev, "mck0");
+ if (IS_ERR(pic32s->clk)) {
+ dev_err(&pdev->dev, "clk not found\n");
+ ret = PTR_ERR(pic32s->clk);
+ goto err_unmap_mem;
+ }
+
+ ret = clk_prepare_enable(pic32s->clk);
+ if (ret)
+ goto err_unmap_mem;
+
+ pic32_spi_hw_init(pic32s);
+
+ return 0;
+
+err_unmap_mem:
+ dev_err(&pdev->dev, "%s failed, err %d\n", __func__, ret);
+ return ret;
+}
+
+static int pic32_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct pic32_spi *pic32s;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*pic32s));
+ if (!master)
+ return -ENOMEM;
+
+ pic32s = spi_master_get_devdata(master);
+ pic32s->master = master;
+
+ ret = pic32_spi_hw_probe(pdev, pic32s);
+ if (ret)
+ goto err_master;
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH;
+ master->num_chipselect = 1; /* single chip-select */
+ master->max_speed_hz = clk_get_rate(pic32s->clk);
+ master->setup = pic32_spi_setup;
+ master->cleanup = pic32_spi_cleanup;
+ master->flags = SPI_MASTER_MUST_TX | SPI_MASTER_MUST_RX;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(32);
+ master->transfer_one = pic32_spi_one_transfer;
+ master->prepare_message = pic32_spi_prepare_message;
+ master->unprepare_message = pic32_spi_unprepare_message;
+ master->prepare_transfer_hardware = pic32_spi_prepare_hardware;
+ master->unprepare_transfer_hardware = pic32_spi_unprepare_hardware;
+ master->use_gpio_descriptors = true;
+
+ /* optional DMA support */
+ ret = pic32_spi_dma_prep(pic32s, &pdev->dev);
+ if (ret)
+ goto err_bailout;
+
+ if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
+ master->can_dma = pic32_spi_can_dma;
+
+ init_completion(&pic32s->xfer_done);
+ pic32s->mode = -1;
+
+ /* install irq handlers (with irq-disabled) */
+ irq_set_status_flags(pic32s->fault_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, pic32s->fault_irq,
+ pic32_spi_fault_irq, IRQF_NO_THREAD,
+ dev_name(&pdev->dev), pic32s);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request fault-irq %d\n", pic32s->rx_irq);
+ goto err_bailout;
+ }
+
+ /* receive interrupt handler */
+ irq_set_status_flags(pic32s->rx_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, pic32s->rx_irq,
+ pic32_spi_rx_irq, IRQF_NO_THREAD,
+ dev_name(&pdev->dev), pic32s);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request rx-irq %d\n", pic32s->rx_irq);
+ goto err_bailout;
+ }
+
+ /* transmit interrupt handler */
+ irq_set_status_flags(pic32s->tx_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, pic32s->tx_irq,
+ pic32_spi_tx_irq, IRQF_NO_THREAD,
+ dev_name(&pdev->dev), pic32s);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request tx-irq %d\n", pic32s->tx_irq);
+ goto err_bailout;
+ }
+
+ /* register master */
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&master->dev, "failed registering spi master\n");
+ goto err_bailout;
+ }
+
+ platform_set_drvdata(pdev, pic32s);
+
+ return 0;
+
+err_bailout:
+ pic32_spi_dma_unprep(pic32s);
+ clk_disable_unprepare(pic32s->clk);
+err_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int pic32_spi_remove(struct platform_device *pdev)
+{
+ struct pic32_spi *pic32s;
+
+ pic32s = platform_get_drvdata(pdev);
+ pic32_spi_disable(pic32s);
+ clk_disable_unprepare(pic32s->clk);
+ pic32_spi_dma_unprep(pic32s);
+
+ return 0;
+}
+
+static const struct of_device_id pic32_spi_of_match[] = {
+ {.compatible = "microchip,pic32mzda-spi",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, pic32_spi_of_match);
+
+static struct platform_driver pic32_spi_driver = {
+ .driver = {
+ .name = "spi-pic32",
+ .of_match_table = of_match_ptr(pic32_spi_of_match),
+ },
+ .probe = pic32_spi_probe,
+ .remove = pic32_spi_remove,
+};
+
+module_platform_driver(pic32_spi_driver);
+
+MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
+MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SPI controller.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
new file mode 100644
index 000000000..e4484ace5
--- /dev/null
+++ b/drivers/spi/spi-pl022.c
@@ -0,0 +1,2453 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
+ *
+ * Copyright (C) 2008-2012 ST-Ericsson AB
+ * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * Initial version inspired by:
+ * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
+ * Initial adoption to PL022 by:
+ * Sachin Verma <sachin.verma@st.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/pl022.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+
+/*
+ * This macro is used to define some register default values.
+ * reg is masked with mask, the OR:ed with an (again masked)
+ * val shifted sb steps to the left.
+ */
+#define SSP_WRITE_BITS(reg, val, mask, sb) \
+ ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
+
+/*
+ * This macro is also used to define some default values.
+ * It will just shift val by sb steps to the left and mask
+ * the result with mask.
+ */
+#define GEN_MASK_BITS(val, mask, sb) \
+ (((val)<<(sb)) & (mask))
+
+#define DRIVE_TX 0
+#define DO_NOT_DRIVE_TX 1
+
+#define DO_NOT_QUEUE_DMA 0
+#define QUEUE_DMA 1
+
+#define RX_TRANSFER 1
+#define TX_TRANSFER 2
+
+/*
+ * Macros to access SSP Registers with their offsets
+ */
+#define SSP_CR0(r) (r + 0x000)
+#define SSP_CR1(r) (r + 0x004)
+#define SSP_DR(r) (r + 0x008)
+#define SSP_SR(r) (r + 0x00C)
+#define SSP_CPSR(r) (r + 0x010)
+#define SSP_IMSC(r) (r + 0x014)
+#define SSP_RIS(r) (r + 0x018)
+#define SSP_MIS(r) (r + 0x01C)
+#define SSP_ICR(r) (r + 0x020)
+#define SSP_DMACR(r) (r + 0x024)
+#define SSP_CSR(r) (r + 0x030) /* vendor extension */
+#define SSP_ITCR(r) (r + 0x080)
+#define SSP_ITIP(r) (r + 0x084)
+#define SSP_ITOP(r) (r + 0x088)
+#define SSP_TDR(r) (r + 0x08C)
+
+#define SSP_PID0(r) (r + 0xFE0)
+#define SSP_PID1(r) (r + 0xFE4)
+#define SSP_PID2(r) (r + 0xFE8)
+#define SSP_PID3(r) (r + 0xFEC)
+
+#define SSP_CID0(r) (r + 0xFF0)
+#define SSP_CID1(r) (r + 0xFF4)
+#define SSP_CID2(r) (r + 0xFF8)
+#define SSP_CID3(r) (r + 0xFFC)
+
+/*
+ * SSP Control Register 0 - SSP_CR0
+ */
+#define SSP_CR0_MASK_DSS (0x0FUL << 0)
+#define SSP_CR0_MASK_FRF (0x3UL << 4)
+#define SSP_CR0_MASK_SPO (0x1UL << 6)
+#define SSP_CR0_MASK_SPH (0x1UL << 7)
+#define SSP_CR0_MASK_SCR (0xFFUL << 8)
+
+/*
+ * The ST version of this block moves som bits
+ * in SSP_CR0 and extends it to 32 bits
+ */
+#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0)
+#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5)
+#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16)
+#define SSP_CR0_MASK_FRF_ST (0x3UL << 21)
+
+/*
+ * SSP Control Register 0 - SSP_CR1
+ */
+#define SSP_CR1_MASK_LBM (0x1UL << 0)
+#define SSP_CR1_MASK_SSE (0x1UL << 1)
+#define SSP_CR1_MASK_MS (0x1UL << 2)
+#define SSP_CR1_MASK_SOD (0x1UL << 3)
+
+/*
+ * The ST version of this block adds some bits
+ * in SSP_CR1
+ */
+#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4)
+#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5)
+#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6)
+#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
+#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
+/* This one is only in the PL023 variant */
+#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
+
+/*
+ * SSP Status Register - SSP_SR
+ */
+#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
+#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
+#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
+#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
+#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
+
+/*
+ * SSP Clock Prescale Register - SSP_CPSR
+ */
+#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
+
+/*
+ * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
+ */
+#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
+#define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
+#define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
+#define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
+
+/*
+ * SSP Raw Interrupt Status Register - SSP_RIS
+ */
+/* Receive Overrun Raw Interrupt status */
+#define SSP_RIS_MASK_RORRIS (0x1UL << 0)
+/* Receive Timeout Raw Interrupt status */
+#define SSP_RIS_MASK_RTRIS (0x1UL << 1)
+/* Receive FIFO Raw Interrupt status */
+#define SSP_RIS_MASK_RXRIS (0x1UL << 2)
+/* Transmit FIFO Raw Interrupt status */
+#define SSP_RIS_MASK_TXRIS (0x1UL << 3)
+
+/*
+ * SSP Masked Interrupt Status Register - SSP_MIS
+ */
+/* Receive Overrun Masked Interrupt status */
+#define SSP_MIS_MASK_RORMIS (0x1UL << 0)
+/* Receive Timeout Masked Interrupt status */
+#define SSP_MIS_MASK_RTMIS (0x1UL << 1)
+/* Receive FIFO Masked Interrupt status */
+#define SSP_MIS_MASK_RXMIS (0x1UL << 2)
+/* Transmit FIFO Masked Interrupt status */
+#define SSP_MIS_MASK_TXMIS (0x1UL << 3)
+
+/*
+ * SSP Interrupt Clear Register - SSP_ICR
+ */
+/* Receive Overrun Raw Clear Interrupt bit */
+#define SSP_ICR_MASK_RORIC (0x1UL << 0)
+/* Receive Timeout Clear Interrupt bit */
+#define SSP_ICR_MASK_RTIC (0x1UL << 1)
+
+/*
+ * SSP DMA Control Register - SSP_DMACR
+ */
+/* Receive DMA Enable bit */
+#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
+/* Transmit DMA Enable bit */
+#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
+
+/*
+ * SSP Chip Select Control Register - SSP_CSR
+ * (vendor extension)
+ */
+#define SSP_CSR_CSVALUE_MASK (0x1FUL << 0)
+
+/*
+ * SSP Integration Test control Register - SSP_ITCR
+ */
+#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
+#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
+
+/*
+ * SSP Integration Test Input Register - SSP_ITIP
+ */
+#define ITIP_MASK_SSPRXD (0x1UL << 0)
+#define ITIP_MASK_SSPFSSIN (0x1UL << 1)
+#define ITIP_MASK_SSPCLKIN (0x1UL << 2)
+#define ITIP_MASK_RXDMAC (0x1UL << 3)
+#define ITIP_MASK_TXDMAC (0x1UL << 4)
+#define ITIP_MASK_SSPTXDIN (0x1UL << 5)
+
+/*
+ * SSP Integration Test output Register - SSP_ITOP
+ */
+#define ITOP_MASK_SSPTXD (0x1UL << 0)
+#define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
+#define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
+#define ITOP_MASK_SSPOEn (0x1UL << 3)
+#define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
+#define ITOP_MASK_RORINTR (0x1UL << 5)
+#define ITOP_MASK_RTINTR (0x1UL << 6)
+#define ITOP_MASK_RXINTR (0x1UL << 7)
+#define ITOP_MASK_TXINTR (0x1UL << 8)
+#define ITOP_MASK_INTR (0x1UL << 9)
+#define ITOP_MASK_RXDMABREQ (0x1UL << 10)
+#define ITOP_MASK_RXDMASREQ (0x1UL << 11)
+#define ITOP_MASK_TXDMABREQ (0x1UL << 12)
+#define ITOP_MASK_TXDMASREQ (0x1UL << 13)
+
+/*
+ * SSP Test Data Register - SSP_TDR
+ */
+#define TDR_MASK_TESTDATA (0xFFFFFFFF)
+
+/*
+ * Message State
+ * we use the spi_message.state (void *) pointer to
+ * hold a single state value, that's why all this
+ * (void *) casting is done here.
+ */
+#define STATE_START ((void *) 0)
+#define STATE_RUNNING ((void *) 1)
+#define STATE_DONE ((void *) 2)
+#define STATE_ERROR ((void *) -1)
+#define STATE_TIMEOUT ((void *) -2)
+
+/*
+ * SSP State - Whether Enabled or Disabled
+ */
+#define SSP_DISABLED (0)
+#define SSP_ENABLED (1)
+
+/*
+ * SSP DMA State - Whether DMA Enabled or Disabled
+ */
+#define SSP_DMA_DISABLED (0)
+#define SSP_DMA_ENABLED (1)
+
+/*
+ * SSP Clock Defaults
+ */
+#define SSP_DEFAULT_CLKRATE 0x2
+#define SSP_DEFAULT_PRESCALE 0x40
+
+/*
+ * SSP Clock Parameter ranges
+ */
+#define CPSDVR_MIN 0x02
+#define CPSDVR_MAX 0xFE
+#define SCR_MIN 0x00
+#define SCR_MAX 0xFF
+
+/*
+ * SSP Interrupt related Macros
+ */
+#define DEFAULT_SSP_REG_IMSC 0x0UL
+#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
+#define ENABLE_ALL_INTERRUPTS ( \
+ SSP_IMSC_MASK_RORIM | \
+ SSP_IMSC_MASK_RTIM | \
+ SSP_IMSC_MASK_RXIM | \
+ SSP_IMSC_MASK_TXIM \
+)
+
+#define CLEAR_ALL_INTERRUPTS 0x3
+
+#define SPI_POLLING_TIMEOUT 1000
+
+/*
+ * The type of reading going on this chip
+ */
+enum ssp_reading {
+ READING_NULL,
+ READING_U8,
+ READING_U16,
+ READING_U32
+};
+
+/*
+ * The type of writing going on this chip
+ */
+enum ssp_writing {
+ WRITING_NULL,
+ WRITING_U8,
+ WRITING_U16,
+ WRITING_U32
+};
+
+/**
+ * struct vendor_data - vendor-specific config parameters
+ * for PL022 derivates
+ * @fifodepth: depth of FIFOs (both)
+ * @max_bpw: maximum number of bits per word
+ * @unidir: supports unidirection transfers
+ * @extended_cr: 32 bit wide control register 0 with extra
+ * features and extra features in CR1 as found in the ST variants
+ * @pl023: supports a subset of the ST extensions called "PL023"
+ * @loopback: supports loopback mode
+ * @internal_cs_ctrl: supports chip select control register
+ */
+struct vendor_data {
+ int fifodepth;
+ int max_bpw;
+ bool unidir;
+ bool extended_cr;
+ bool pl023;
+ bool loopback;
+ bool internal_cs_ctrl;
+};
+
+/**
+ * struct pl022 - This is the private SSP driver data structure
+ * @adev: AMBA device model hookup
+ * @vendor: vendor data for the IP block
+ * @phybase: the physical memory where the SSP device resides
+ * @virtbase: the virtual memory where the SSP is mapped
+ * @clk: outgoing clock "SPICLK" for the SPI bus
+ * @master: SPI framework hookup
+ * @master_info: controller-specific data from machine setup
+ * @pump_transfers: Tasklet used in Interrupt Transfer mode
+ * @cur_msg: Pointer to current spi_message being processed
+ * @cur_transfer: Pointer to current spi_transfer
+ * @cur_chip: pointer to current clients chip(assigned from controller_state)
+ * @next_msg_cs_active: the next message in the queue has been examined
+ * and it was found that it uses the same chip select as the previous
+ * message, so we left it active after the previous transfer, and it's
+ * active already.
+ * @tx: current position in TX buffer to be read
+ * @tx_end: end position in TX buffer to be read
+ * @rx: current position in RX buffer to be written
+ * @rx_end: end position in RX buffer to be written
+ * @read: the type of read currently going on
+ * @write: the type of write currently going on
+ * @exp_fifo_level: expected FIFO level
+ * @rx_lev_trig: receive FIFO watermark level which triggers IRQ
+ * @tx_lev_trig: transmit FIFO watermark level which triggers IRQ
+ * @dma_rx_channel: optional channel for RX DMA
+ * @dma_tx_channel: optional channel for TX DMA
+ * @sgt_rx: scattertable for the RX transfer
+ * @sgt_tx: scattertable for the TX transfer
+ * @dummypage: a dummy page used for driving data on the bus with DMA
+ * @dma_running: indicates whether DMA is in operation
+ * @cur_cs: current chip select index
+ * @cur_gpiod: current chip select GPIO descriptor
+ */
+struct pl022 {
+ struct amba_device *adev;
+ struct vendor_data *vendor;
+ resource_size_t phybase;
+ void __iomem *virtbase;
+ struct clk *clk;
+ struct spi_master *master;
+ struct pl022_ssp_controller *master_info;
+ /* Message per-transfer pump */
+ struct tasklet_struct pump_transfers;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ bool next_msg_cs_active;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ enum ssp_reading read;
+ enum ssp_writing write;
+ u32 exp_fifo_level;
+ enum ssp_rx_level_trig rx_lev_trig;
+ enum ssp_tx_level_trig tx_lev_trig;
+ /* DMA settings */
+#ifdef CONFIG_DMA_ENGINE
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+ struct sg_table sgt_rx;
+ struct sg_table sgt_tx;
+ char *dummypage;
+ bool dma_running;
+#endif
+ int cur_cs;
+ struct gpio_desc *cur_gpiod;
+};
+
+/**
+ * struct chip_data - To maintain runtime state of SSP for each client chip
+ * @cr0: Value of control register CR0 of SSP - on later ST variants this
+ * register is 32 bits wide rather than just 16
+ * @cr1: Value of control register CR1 of SSP
+ * @dmacr: Value of DMA control Register of SSP
+ * @cpsr: Value of Clock prescale register
+ * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
+ * @enable_dma: Whether to enable DMA or not
+ * @read: function ptr to be used to read when doing xfer for this chip
+ * @write: function ptr to be used to write when doing xfer for this chip
+ * @xfer_type: polling/interrupt/DMA
+ *
+ * Runtime state of the SSP controller, maintained per chip,
+ * This would be set according to the current message that would be served
+ */
+struct chip_data {
+ u32 cr0;
+ u16 cr1;
+ u16 dmacr;
+ u16 cpsr;
+ u8 n_bytes;
+ bool enable_dma;
+ enum ssp_reading read;
+ enum ssp_writing write;
+ int xfer_type;
+};
+
+/**
+ * internal_cs_control - Control chip select signals via SSP_CSR.
+ * @pl022: SSP driver private data structure
+ * @command: select/delect the chip
+ *
+ * Used on controller with internal chip select control via SSP_CSR register
+ * (vendor extension). Each of the 5 LSB in the register controls one chip
+ * select signal.
+ */
+static void internal_cs_control(struct pl022 *pl022, u32 command)
+{
+ u32 tmp;
+
+ tmp = readw(SSP_CSR(pl022->virtbase));
+ if (command == SSP_CHIP_SELECT)
+ tmp &= ~BIT(pl022->cur_cs);
+ else
+ tmp |= BIT(pl022->cur_cs);
+ writew(tmp, SSP_CSR(pl022->virtbase));
+}
+
+static void pl022_cs_control(struct pl022 *pl022, u32 command)
+{
+ if (pl022->vendor->internal_cs_ctrl)
+ internal_cs_control(pl022, command);
+ else if (pl022->cur_gpiod)
+ /*
+ * This needs to be inverted since with GPIOLIB in
+ * control, the inversion will be handled by
+ * GPIOLIB's active low handling. The "command"
+ * passed into this function will be SSP_CHIP_SELECT
+ * which is enum:ed to 0, so we need the inverse
+ * (1) to activate chip select.
+ */
+ gpiod_set_value(pl022->cur_gpiod, !command);
+}
+
+/**
+ * giveback - current spi_message is over, schedule next message and call
+ * callback of this message. Assumes that caller already
+ * set message->status; dma and pio irqs are blocked
+ * @pl022: SSP driver private data structure
+ */
+static void giveback(struct pl022 *pl022)
+{
+ struct spi_transfer *last_transfer;
+ pl022->next_msg_cs_active = false;
+
+ last_transfer = list_last_entry(&pl022->cur_msg->transfers,
+ struct spi_transfer, transfer_list);
+
+ /* Delay if requested before any change in chip select */
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ spi_transfer_delay_exec(last_transfer);
+
+ if (!last_transfer->cs_change) {
+ struct spi_message *next_msg;
+
+ /*
+ * cs_change was not set. We can keep the chip select
+ * enabled if there is message in the queue and it is
+ * for the same spi device.
+ *
+ * We cannot postpone this until pump_messages, because
+ * after calling msg->complete (below) the driver that
+ * sent the current message could be unloaded, which
+ * could invalidate the cs_control() callback...
+ */
+ /* get a pointer to the next message, if any */
+ next_msg = spi_get_next_queued_message(pl022->master);
+
+ /*
+ * see if the next and current messages point
+ * to the same spi device.
+ */
+ if (next_msg && next_msg->spi != pl022->cur_msg->spi)
+ next_msg = NULL;
+ if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
+ else
+ pl022->next_msg_cs_active = true;
+
+ }
+
+ pl022->cur_msg = NULL;
+ pl022->cur_transfer = NULL;
+ pl022->cur_chip = NULL;
+
+ /* disable the SPI/SSP operation */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+ spi_finalize_current_message(pl022->master);
+}
+
+/**
+ * flush - flush the FIFO to reach a clean state
+ * @pl022: SSP driver private data structure
+ */
+static int flush(struct pl022 *pl022)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ dev_dbg(&pl022->adev->dev, "flush\n");
+ do {
+ while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
+ readw(SSP_DR(pl022->virtbase));
+ } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
+
+ pl022->exp_fifo_level = 0;
+
+ return limit;
+}
+
+/**
+ * restore_state - Load configuration of current chip
+ * @pl022: SSP driver private data structure
+ */
+static void restore_state(struct pl022 *pl022)
+{
+ struct chip_data *chip = pl022->cur_chip;
+
+ if (pl022->vendor->extended_cr)
+ writel(chip->cr0, SSP_CR0(pl022->virtbase));
+ else
+ writew(chip->cr0, SSP_CR0(pl022->virtbase));
+ writew(chip->cr1, SSP_CR1(pl022->virtbase));
+ writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
+ writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
+ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+}
+
+/*
+ * Default SSP Register Values
+ */
+#define DEFAULT_SSP_REG_CR0 ( \
+ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
+ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
+ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
+ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
+ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
+)
+
+/* ST versions have slightly different bit layout */
+#define DEFAULT_SSP_REG_CR0_ST ( \
+ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
+ GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
+ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
+ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
+ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
+ GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \
+ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
+)
+
+/* The PL023 version is slightly different again */
+#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
+ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
+ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
+ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
+ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
+)
+
+#define DEFAULT_SSP_REG_CR1 ( \
+ GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
+ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
+ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
+ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
+)
+
+/* ST versions extend this register to use all 16 bits */
+#define DEFAULT_SSP_REG_CR1_ST ( \
+ DEFAULT_SSP_REG_CR1 | \
+ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
+ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
+ GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
+ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
+ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
+)
+
+/*
+ * The PL023 variant has further differences: no loopback mode, no microwire
+ * support, and a new clock feedback delay setting.
+ */
+#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
+ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
+ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
+ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
+ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
+ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
+ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
+ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
+ GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
+)
+
+#define DEFAULT_SSP_REG_CPSR ( \
+ GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
+)
+
+#define DEFAULT_SSP_REG_DMACR (\
+ GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
+ GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
+)
+
+/**
+ * load_ssp_default_config - Load default configuration for SSP
+ * @pl022: SSP driver private data structure
+ */
+static void load_ssp_default_config(struct pl022 *pl022)
+{
+ if (pl022->vendor->pl023) {
+ writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
+ } else if (pl022->vendor->extended_cr) {
+ writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
+ } else {
+ writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
+ }
+ writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
+ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+}
+
+/*
+ * This will write to TX and read from RX according to the parameters
+ * set in pl022.
+ */
+static void readwriter(struct pl022 *pl022)
+{
+
+ /*
+ * The FIFO depth is different between primecell variants.
+ * I believe filling in too much in the FIFO might cause
+ * errons in 8bit wide transfers on ARM variants (just 8 words
+ * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+ *
+ * To prevent this issue, the TX FIFO is only filled to the
+ * unused RX FIFO fill length, regardless of what the TX
+ * FIFO status flag indicates.
+ */
+ dev_dbg(&pl022->adev->dev,
+ "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
+ __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
+
+ /* Read as much as you can */
+ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
+ && (pl022->rx < pl022->rx_end)) {
+ switch (pl022->read) {
+ case READING_NULL:
+ readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U8:
+ *(u8 *) (pl022->rx) =
+ readw(SSP_DR(pl022->virtbase)) & 0xFFU;
+ break;
+ case READING_U16:
+ *(u16 *) (pl022->rx) =
+ (u16) readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U32:
+ *(u32 *) (pl022->rx) =
+ readl(SSP_DR(pl022->virtbase));
+ break;
+ }
+ pl022->rx += (pl022->cur_chip->n_bytes);
+ pl022->exp_fifo_level--;
+ }
+ /*
+ * Write as much as possible up to the RX FIFO size
+ */
+ while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
+ && (pl022->tx < pl022->tx_end)) {
+ switch (pl022->write) {
+ case WRITING_NULL:
+ writew(0x0, SSP_DR(pl022->virtbase));
+ break;
+ case WRITING_U8:
+ writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
+ break;
+ case WRITING_U16:
+ writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
+ break;
+ case WRITING_U32:
+ writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
+ break;
+ }
+ pl022->tx += (pl022->cur_chip->n_bytes);
+ pl022->exp_fifo_level++;
+ /*
+ * This inner reader takes care of things appearing in the RX
+ * FIFO as we're transmitting. This will happen a lot since the
+ * clock starts running when you put things into the TX FIFO,
+ * and then things are continuously clocked into the RX FIFO.
+ */
+ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
+ && (pl022->rx < pl022->rx_end)) {
+ switch (pl022->read) {
+ case READING_NULL:
+ readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U8:
+ *(u8 *) (pl022->rx) =
+ readw(SSP_DR(pl022->virtbase)) & 0xFFU;
+ break;
+ case READING_U16:
+ *(u16 *) (pl022->rx) =
+ (u16) readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U32:
+ *(u32 *) (pl022->rx) =
+ readl(SSP_DR(pl022->virtbase));
+ break;
+ }
+ pl022->rx += (pl022->cur_chip->n_bytes);
+ pl022->exp_fifo_level--;
+ }
+ }
+ /*
+ * When we exit here the TX FIFO should be full and the RX FIFO
+ * should be empty
+ */
+}
+
+/**
+ * next_transfer - Move to the Next transfer in the current spi message
+ * @pl022: SSP driver private data structure
+ *
+ * This function moves though the linked list of spi transfers in the
+ * current spi message and returns with the state of current spi
+ * message i.e whether its last transfer is done(STATE_DONE) or
+ * Next transfer is ready(STATE_RUNNING)
+ */
+static void *next_transfer(struct pl022 *pl022)
+{
+ struct spi_message *msg = pl022->cur_msg;
+ struct spi_transfer *trans = pl022->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ pl022->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ return STATE_RUNNING;
+ }
+ return STATE_DONE;
+}
+
+/*
+ * This DMA functionality is only compiled in if we have
+ * access to the generic DMA devices/DMA engine.
+ */
+#ifdef CONFIG_DMA_ENGINE
+static void unmap_free_dma_scatter(struct pl022 *pl022)
+{
+ /* Unmap and free the SG tables */
+ dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl,
+ pl022->sgt_tx.nents, DMA_TO_DEVICE);
+ dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl,
+ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
+ sg_free_table(&pl022->sgt_rx);
+ sg_free_table(&pl022->sgt_tx);
+}
+
+static void dma_callback(void *data)
+{
+ struct pl022 *pl022 = data;
+ struct spi_message *msg = pl022->cur_msg;
+
+ BUG_ON(!pl022->sgt_rx.sgl);
+
+#ifdef VERBOSE_DEBUG
+ /*
+ * Optionally dump out buffers to inspect contents, this is
+ * good if you want to convince yourself that the loopback
+ * read/write contents are the same, when adopting to a new
+ * DMA engine.
+ */
+ {
+ struct scatterlist *sg;
+ unsigned int i;
+
+ dma_sync_sg_for_cpu(&pl022->adev->dev,
+ pl022->sgt_rx.sgl,
+ pl022->sgt_rx.nents,
+ DMA_FROM_DEVICE);
+
+ for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) {
+ dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i);
+ print_hex_dump(KERN_ERR, "SPI RX: ",
+ DUMP_PREFIX_OFFSET,
+ 16,
+ 1,
+ sg_virt(sg),
+ sg_dma_len(sg),
+ 1);
+ }
+ for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) {
+ dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i);
+ print_hex_dump(KERN_ERR, "SPI TX: ",
+ DUMP_PREFIX_OFFSET,
+ 16,
+ 1,
+ sg_virt(sg),
+ sg_dma_len(sg),
+ 1);
+ }
+ }
+#endif
+
+ unmap_free_dma_scatter(pl022);
+
+ /* Update total bytes transferred */
+ msg->actual_length += pl022->cur_transfer->len;
+ /* Move to next transfer */
+ msg->state = next_transfer(pl022);
+ if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
+ tasklet_schedule(&pl022->pump_transfers);
+}
+
+static void setup_dma_scatter(struct pl022 *pl022,
+ void *buffer,
+ unsigned int length,
+ struct sg_table *sgtab)
+{
+ struct scatterlist *sg;
+ int bytesleft = length;
+ void *bufp = buffer;
+ int mapbytes;
+ int i;
+
+ if (buffer) {
+ for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
+ /*
+ * If there are less bytes left than what fits
+ * in the current page (plus page alignment offset)
+ * we just feed in this, else we stuff in as much
+ * as we can.
+ */
+ if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
+ mapbytes = bytesleft;
+ else
+ mapbytes = PAGE_SIZE - offset_in_page(bufp);
+ sg_set_page(sg, virt_to_page(bufp),
+ mapbytes, offset_in_page(bufp));
+ bufp += mapbytes;
+ bytesleft -= mapbytes;
+ dev_dbg(&pl022->adev->dev,
+ "set RX/TX target page @ %p, %d bytes, %d left\n",
+ bufp, mapbytes, bytesleft);
+ }
+ } else {
+ /* Map the dummy buffer on every page */
+ for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
+ if (bytesleft < PAGE_SIZE)
+ mapbytes = bytesleft;
+ else
+ mapbytes = PAGE_SIZE;
+ sg_set_page(sg, virt_to_page(pl022->dummypage),
+ mapbytes, 0);
+ bytesleft -= mapbytes;
+ dev_dbg(&pl022->adev->dev,
+ "set RX/TX to dummy page %d bytes, %d left\n",
+ mapbytes, bytesleft);
+
+ }
+ }
+ BUG_ON(bytesleft);
+}
+
+/**
+ * configure_dma - configures the channels for the next transfer
+ * @pl022: SSP driver's private data structure
+ */
+static int configure_dma(struct pl022 *pl022)
+{
+ struct dma_slave_config rx_conf = {
+ .src_addr = SSP_DR(pl022->phybase),
+ .direction = DMA_DEV_TO_MEM,
+ .device_fc = false,
+ };
+ struct dma_slave_config tx_conf = {
+ .dst_addr = SSP_DR(pl022->phybase),
+ .direction = DMA_MEM_TO_DEV,
+ .device_fc = false,
+ };
+ unsigned int pages;
+ int ret;
+ int rx_sglen, tx_sglen;
+ struct dma_chan *rxchan = pl022->dma_rx_channel;
+ struct dma_chan *txchan = pl022->dma_tx_channel;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+
+ /* Check that the channels are available */
+ if (!rxchan || !txchan)
+ return -ENODEV;
+
+ /*
+ * If supplied, the DMA burstsize should equal the FIFO trigger level.
+ * Notice that the DMA engine uses one-to-one mapping. Since we can
+ * not trigger on 2 elements this needs explicit mapping rather than
+ * calculation.
+ */
+ switch (pl022->rx_lev_trig) {
+ case SSP_RX_1_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 1;
+ break;
+ case SSP_RX_4_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 4;
+ break;
+ case SSP_RX_8_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 8;
+ break;
+ case SSP_RX_16_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 16;
+ break;
+ case SSP_RX_32_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 32;
+ break;
+ default:
+ rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (pl022->tx_lev_trig) {
+ case SSP_TX_1_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 1;
+ break;
+ case SSP_TX_4_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 4;
+ break;
+ case SSP_TX_8_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 8;
+ break;
+ case SSP_TX_16_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 16;
+ break;
+ case SSP_TX_32_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 32;
+ break;
+ default:
+ tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (pl022->read) {
+ case READING_NULL:
+ /* Use the same as for writing */
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ break;
+ case READING_U8:
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case READING_U16:
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case READING_U32:
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ }
+
+ switch (pl022->write) {
+ case WRITING_NULL:
+ /* Use the same as for reading */
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ break;
+ case WRITING_U8:
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case WRITING_U16:
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case WRITING_U32:
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ }
+
+ /* SPI pecularity: we need to read and write the same width */
+ if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ rx_conf.src_addr_width = tx_conf.dst_addr_width;
+ if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ tx_conf.dst_addr_width = rx_conf.src_addr_width;
+ BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
+
+ dmaengine_slave_config(rxchan, &rx_conf);
+ dmaengine_slave_config(txchan, &tx_conf);
+
+ /* Create sglists for the transfers */
+ pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE);
+ dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
+
+ ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC);
+ if (ret)
+ goto err_alloc_rx_sg;
+
+ ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC);
+ if (ret)
+ goto err_alloc_tx_sg;
+
+ /* Fill in the scatterlists for the RX+TX buffers */
+ setup_dma_scatter(pl022, pl022->rx,
+ pl022->cur_transfer->len, &pl022->sgt_rx);
+ setup_dma_scatter(pl022, pl022->tx,
+ pl022->cur_transfer->len, &pl022->sgt_tx);
+
+ /* Map DMA buffers */
+ rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
+ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
+ if (!rx_sglen)
+ goto err_rx_sgmap;
+
+ tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl,
+ pl022->sgt_tx.nents, DMA_TO_DEVICE);
+ if (!tx_sglen)
+ goto err_tx_sgmap;
+
+ /* Send both scatterlists */
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ pl022->sgt_rx.sgl,
+ rx_sglen,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto err_rxdesc;
+
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ pl022->sgt_tx.sgl,
+ tx_sglen,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto err_txdesc;
+
+ /* Put the callback on the RX transfer only, that should finish last */
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = pl022;
+
+ /* Submit and fire RX and TX with TX last so we're ready to read! */
+ dmaengine_submit(rxdesc);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(rxchan);
+ dma_async_issue_pending(txchan);
+ pl022->dma_running = true;
+
+ return 0;
+
+err_txdesc:
+ dmaengine_terminate_all(txchan);
+err_rxdesc:
+ dmaengine_terminate_all(rxchan);
+ dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
+ pl022->sgt_tx.nents, DMA_TO_DEVICE);
+err_tx_sgmap:
+ dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
+ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
+err_rx_sgmap:
+ sg_free_table(&pl022->sgt_tx);
+err_alloc_tx_sg:
+ sg_free_table(&pl022->sgt_rx);
+err_alloc_rx_sg:
+ return -ENOMEM;
+}
+
+static int pl022_dma_probe(struct pl022 *pl022)
+{
+ dma_cap_mask_t mask;
+
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ /*
+ * We need both RX and TX channels to do DMA, else do none
+ * of them.
+ */
+ pl022->dma_rx_channel = dma_request_channel(mask,
+ pl022->master_info->dma_filter,
+ pl022->master_info->dma_rx_param);
+ if (!pl022->dma_rx_channel) {
+ dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
+ goto err_no_rxchan;
+ }
+
+ pl022->dma_tx_channel = dma_request_channel(mask,
+ pl022->master_info->dma_filter,
+ pl022->master_info->dma_tx_param);
+ if (!pl022->dma_tx_channel) {
+ dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
+ goto err_no_txchan;
+ }
+
+ pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pl022->dummypage)
+ goto err_no_dummypage;
+
+ dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
+ dma_chan_name(pl022->dma_rx_channel),
+ dma_chan_name(pl022->dma_tx_channel));
+
+ return 0;
+
+err_no_dummypage:
+ dma_release_channel(pl022->dma_tx_channel);
+err_no_txchan:
+ dma_release_channel(pl022->dma_rx_channel);
+ pl022->dma_rx_channel = NULL;
+err_no_rxchan:
+ dev_err(&pl022->adev->dev,
+ "Failed to work in dma mode, work without dma!\n");
+ return -ENODEV;
+}
+
+static int pl022_dma_autoprobe(struct pl022 *pl022)
+{
+ struct device *dev = &pl022->adev->dev;
+ struct dma_chan *chan;
+ int err;
+
+ /* automatically configure DMA channels from platform, normally using DT */
+ chan = dma_request_chan(dev, "rx");
+ if (IS_ERR(chan)) {
+ err = PTR_ERR(chan);
+ goto err_no_rxchan;
+ }
+
+ pl022->dma_rx_channel = chan;
+
+ chan = dma_request_chan(dev, "tx");
+ if (IS_ERR(chan)) {
+ err = PTR_ERR(chan);
+ goto err_no_txchan;
+ }
+
+ pl022->dma_tx_channel = chan;
+
+ pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pl022->dummypage) {
+ err = -ENOMEM;
+ goto err_no_dummypage;
+ }
+
+ return 0;
+
+err_no_dummypage:
+ dma_release_channel(pl022->dma_tx_channel);
+ pl022->dma_tx_channel = NULL;
+err_no_txchan:
+ dma_release_channel(pl022->dma_rx_channel);
+ pl022->dma_rx_channel = NULL;
+err_no_rxchan:
+ return err;
+}
+
+static void terminate_dma(struct pl022 *pl022)
+{
+ struct dma_chan *rxchan = pl022->dma_rx_channel;
+ struct dma_chan *txchan = pl022->dma_tx_channel;
+
+ dmaengine_terminate_all(rxchan);
+ dmaengine_terminate_all(txchan);
+ unmap_free_dma_scatter(pl022);
+ pl022->dma_running = false;
+}
+
+static void pl022_dma_remove(struct pl022 *pl022)
+{
+ if (pl022->dma_running)
+ terminate_dma(pl022);
+ if (pl022->dma_tx_channel)
+ dma_release_channel(pl022->dma_tx_channel);
+ if (pl022->dma_rx_channel)
+ dma_release_channel(pl022->dma_rx_channel);
+ kfree(pl022->dummypage);
+}
+
+#else
+static inline int configure_dma(struct pl022 *pl022)
+{
+ return -ENODEV;
+}
+
+static inline int pl022_dma_autoprobe(struct pl022 *pl022)
+{
+ return 0;
+}
+
+static inline int pl022_dma_probe(struct pl022 *pl022)
+{
+ return 0;
+}
+
+static inline void pl022_dma_remove(struct pl022 *pl022)
+{
+}
+#endif
+
+/**
+ * pl022_interrupt_handler - Interrupt handler for SSP controller
+ * @irq: IRQ number
+ * @dev_id: Local device data
+ *
+ * This function handles interrupts generated for an interrupt based transfer.
+ * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
+ * current message's state as STATE_ERROR and schedule the tasklet
+ * pump_transfers which will do the postprocessing of the current message by
+ * calling giveback(). Otherwise it reads data from RX FIFO till there is no
+ * more data, and writes data in TX FIFO till it is not full. If we complete
+ * the transfer we move to the next transfer and schedule the tasklet.
+ */
+static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
+{
+ struct pl022 *pl022 = dev_id;
+ struct spi_message *msg = pl022->cur_msg;
+ u16 irq_status = 0;
+
+ if (unlikely(!msg)) {
+ dev_err(&pl022->adev->dev,
+ "bad message state in interrupt handler");
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ /* Read the Interrupt Status Register */
+ irq_status = readw(SSP_MIS(pl022->virtbase));
+
+ if (unlikely(!irq_status))
+ return IRQ_NONE;
+
+ /*
+ * This handles the FIFO interrupts, the timeout
+ * interrupts are flatly ignored, they cannot be
+ * trusted.
+ */
+ if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
+ /*
+ * Overrun interrupt - bail out since our Data has been
+ * corrupted
+ */
+ dev_err(&pl022->adev->dev, "FIFO overrun\n");
+ if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
+ dev_err(&pl022->adev->dev,
+ "RXFIFO is full\n");
+
+ /*
+ * Disable and clear interrupts, disable SSP,
+ * mark message with bad status so it can be
+ * retried.
+ */
+ writew(DISABLE_ALL_INTERRUPTS,
+ SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+ msg->state = STATE_ERROR;
+
+ /* Schedule message queue handler */
+ tasklet_schedule(&pl022->pump_transfers);
+ return IRQ_HANDLED;
+ }
+
+ readwriter(pl022);
+
+ if (pl022->tx == pl022->tx_end) {
+ /* Disable Transmit interrupt, enable receive interrupt */
+ writew((readw(SSP_IMSC(pl022->virtbase)) &
+ ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
+ SSP_IMSC(pl022->virtbase));
+ }
+
+ /*
+ * Since all transactions must write as much as shall be read,
+ * we can conclude the entire transaction once RX is complete.
+ * At this point, all TX will always be finished.
+ */
+ if (pl022->rx >= pl022->rx_end) {
+ writew(DISABLE_ALL_INTERRUPTS,
+ SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+ if (unlikely(pl022->rx > pl022->rx_end)) {
+ dev_warn(&pl022->adev->dev, "read %u surplus "
+ "bytes (did you request an odd "
+ "number of bytes on a 16bit bus?)\n",
+ (u32) (pl022->rx - pl022->rx_end));
+ }
+ /* Update total bytes transferred */
+ msg->actual_length += pl022->cur_transfer->len;
+ /* Move to next transfer */
+ msg->state = next_transfer(pl022);
+ if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
+ tasklet_schedule(&pl022->pump_transfers);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * This sets up the pointers to memory for the next message to
+ * send out on the SPI bus.
+ */
+static int set_up_next_transfer(struct pl022 *pl022,
+ struct spi_transfer *transfer)
+{
+ int residue;
+
+ /* Sanity check the message for this bus width */
+ residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
+ if (unlikely(residue != 0)) {
+ dev_err(&pl022->adev->dev,
+ "message of %u bytes to transmit but the current "
+ "chip bus has a data width of %u bytes!\n",
+ pl022->cur_transfer->len,
+ pl022->cur_chip->n_bytes);
+ dev_err(&pl022->adev->dev, "skipping this message\n");
+ return -EIO;
+ }
+ pl022->tx = (void *)transfer->tx_buf;
+ pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
+ pl022->rx = (void *)transfer->rx_buf;
+ pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
+ pl022->write =
+ pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
+ pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
+ return 0;
+}
+
+/**
+ * pump_transfers - Tasklet function which schedules next transfer
+ * when running in interrupt or DMA transfer mode.
+ * @data: SSP driver private data structure
+ *
+ */
+static void pump_transfers(unsigned long data)
+{
+ struct pl022 *pl022 = (struct pl022 *) data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+
+ /* Get current state information */
+ message = pl022->cur_msg;
+ transfer = pl022->cur_transfer;
+
+ /* Handle for abort */
+ if (message->state == STATE_ERROR) {
+ message->status = -EIO;
+ giveback(pl022);
+ return;
+ }
+
+ /* Handle end of message */
+ if (message->state == STATE_DONE) {
+ message->status = 0;
+ giveback(pl022);
+ return;
+ }
+
+ /* Delay if requested at end of transfer before CS change */
+ if (message->state == STATE_RUNNING) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer,
+ transfer_list);
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ spi_transfer_delay_exec(previous);
+
+ /* Reselect chip select only if cs_change was requested */
+ if (previous->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_SELECT);
+ } else {
+ /* STATE_START */
+ message->state = STATE_RUNNING;
+ }
+
+ if (set_up_next_transfer(pl022, transfer)) {
+ message->state = STATE_ERROR;
+ message->status = -EIO;
+ giveback(pl022);
+ return;
+ }
+ /* Flush the FIFOs and let's go! */
+ flush(pl022);
+
+ if (pl022->cur_chip->enable_dma) {
+ if (configure_dma(pl022)) {
+ dev_dbg(&pl022->adev->dev,
+ "configuration of DMA failed, fall back to interrupt mode\n");
+ goto err_config_dma;
+ }
+ return;
+ }
+
+err_config_dma:
+ /* enable all interrupts except RX */
+ writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
+}
+
+static void do_interrupt_dma_transfer(struct pl022 *pl022)
+{
+ /*
+ * Default is to enable all interrupts except RX -
+ * this will be enabled once TX is complete
+ */
+ u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
+
+ /* Enable target chip, if not already active */
+ if (!pl022->next_msg_cs_active)
+ pl022_cs_control(pl022, SSP_CHIP_SELECT);
+
+ if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
+ /* Error path */
+ pl022->cur_msg->state = STATE_ERROR;
+ pl022->cur_msg->status = -EIO;
+ giveback(pl022);
+ return;
+ }
+ /* If we're using DMA, set up DMA here */
+ if (pl022->cur_chip->enable_dma) {
+ /* Configure DMA transfer */
+ if (configure_dma(pl022)) {
+ dev_dbg(&pl022->adev->dev,
+ "configuration of DMA failed, fall back to interrupt mode\n");
+ goto err_config_dma;
+ }
+ /* Disable interrupts in DMA mode, IRQ from DMA controller */
+ irqflags = DISABLE_ALL_INTERRUPTS;
+ }
+err_config_dma:
+ /* Enable SSP, turn on interrupts */
+ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
+ SSP_CR1(pl022->virtbase));
+ writew(irqflags, SSP_IMSC(pl022->virtbase));
+}
+
+static void print_current_status(struct pl022 *pl022)
+{
+ u32 read_cr0;
+ u16 read_cr1, read_dmacr, read_sr;
+
+ if (pl022->vendor->extended_cr)
+ read_cr0 = readl(SSP_CR0(pl022->virtbase));
+ else
+ read_cr0 = readw(SSP_CR0(pl022->virtbase));
+ read_cr1 = readw(SSP_CR1(pl022->virtbase));
+ read_dmacr = readw(SSP_DMACR(pl022->virtbase));
+ read_sr = readw(SSP_SR(pl022->virtbase));
+
+ dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0);
+ dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1);
+ dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr);
+ dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr);
+ dev_warn(&pl022->adev->dev,
+ "spi-pl022 exp_fifo_level/fifodepth: %u/%d\n",
+ pl022->exp_fifo_level,
+ pl022->vendor->fifodepth);
+
+}
+
+static void do_polling_transfer(struct pl022 *pl022)
+{
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ unsigned long time, timeout;
+
+ message = pl022->cur_msg;
+
+ while (message->state != STATE_DONE) {
+ /* Handle for abort */
+ if (message->state == STATE_ERROR)
+ break;
+ transfer = pl022->cur_transfer;
+
+ /* Delay if requested at end of transfer */
+ if (message->state == STATE_RUNNING) {
+ previous =
+ list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ spi_transfer_delay_exec(previous);
+ if (previous->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_SELECT);
+ } else {
+ /* STATE_START */
+ message->state = STATE_RUNNING;
+ if (!pl022->next_msg_cs_active)
+ pl022_cs_control(pl022, SSP_CHIP_SELECT);
+ }
+
+ /* Configuration Changing Per Transfer */
+ if (set_up_next_transfer(pl022, transfer)) {
+ /* Error path */
+ message->state = STATE_ERROR;
+ break;
+ }
+ /* Flush FIFOs and enable SSP */
+ flush(pl022);
+ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
+ SSP_CR1(pl022->virtbase));
+
+ dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
+
+ timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
+ while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
+ time = jiffies;
+ readwriter(pl022);
+ if (time_after(time, timeout)) {
+ dev_warn(&pl022->adev->dev,
+ "%s: timeout!\n", __func__);
+ message->state = STATE_TIMEOUT;
+ print_current_status(pl022);
+ goto out;
+ }
+ cpu_relax();
+ }
+
+ /* Update total byte transferred */
+ message->actual_length += pl022->cur_transfer->len;
+ /* Move to next transfer */
+ message->state = next_transfer(pl022);
+ if (message->state != STATE_DONE
+ && pl022->cur_transfer->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
+ }
+out:
+ /* Handle end of message */
+ if (message->state == STATE_DONE)
+ message->status = 0;
+ else if (message->state == STATE_TIMEOUT)
+ message->status = -EAGAIN;
+ else
+ message->status = -EIO;
+
+ giveback(pl022);
+ return;
+}
+
+static int pl022_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct pl022 *pl022 = spi_master_get_devdata(master);
+
+ /* Initial message state */
+ pl022->cur_msg = msg;
+ msg->state = STATE_START;
+
+ pl022->cur_transfer = list_entry(msg->transfers.next,
+ struct spi_transfer, transfer_list);
+
+ /* Setup the SPI using the per chip configuration */
+ pl022->cur_chip = spi_get_ctldata(msg->spi);
+ pl022->cur_cs = msg->spi->chip_select;
+ /* This is always available but may be set to -ENOENT */
+ pl022->cur_gpiod = msg->spi->cs_gpiod;
+
+ restore_state(pl022);
+ flush(pl022);
+
+ if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
+ do_polling_transfer(pl022);
+ else
+ do_interrupt_dma_transfer(pl022);
+
+ return 0;
+}
+
+static int pl022_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct pl022 *pl022 = spi_master_get_devdata(master);
+
+ /* nothing more to do - disable spi/ssp and power off */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+ return 0;
+}
+
+static int verify_controller_parameters(struct pl022 *pl022,
+ struct pl022_config_chip const *chip_info)
+{
+ if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
+ || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
+ dev_err(&pl022->adev->dev,
+ "interface is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
+ (!pl022->vendor->unidir)) {
+ dev_err(&pl022->adev->dev,
+ "unidirectional mode not supported in this "
+ "hardware version\n");
+ return -EINVAL;
+ }
+ if ((chip_info->hierarchy != SSP_MASTER)
+ && (chip_info->hierarchy != SSP_SLAVE)) {
+ dev_err(&pl022->adev->dev,
+ "hierarchy is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->com_mode != INTERRUPT_TRANSFER)
+ && (chip_info->com_mode != DMA_TRANSFER)
+ && (chip_info->com_mode != POLLING_TRANSFER)) {
+ dev_err(&pl022->adev->dev,
+ "Communication mode is configured incorrectly\n");
+ return -EINVAL;
+ }
+ switch (chip_info->rx_lev_trig) {
+ case SSP_RX_1_OR_MORE_ELEM:
+ case SSP_RX_4_OR_MORE_ELEM:
+ case SSP_RX_8_OR_MORE_ELEM:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SSP_RX_16_OR_MORE_ELEM:
+ if (pl022->vendor->fifodepth < 16) {
+ dev_err(&pl022->adev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SSP_RX_32_OR_MORE_ELEM:
+ if (pl022->vendor->fifodepth < 32) {
+ dev_err(&pl022->adev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(&pl022->adev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ switch (chip_info->tx_lev_trig) {
+ case SSP_TX_1_OR_MORE_EMPTY_LOC:
+ case SSP_TX_4_OR_MORE_EMPTY_LOC:
+ case SSP_TX_8_OR_MORE_EMPTY_LOC:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SSP_TX_16_OR_MORE_EMPTY_LOC:
+ if (pl022->vendor->fifodepth < 16) {
+ dev_err(&pl022->adev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SSP_TX_32_OR_MORE_EMPTY_LOC:
+ if (pl022->vendor->fifodepth < 32) {
+ dev_err(&pl022->adev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(&pl022->adev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
+ if ((chip_info->ctrl_len < SSP_BITS_4)
+ || (chip_info->ctrl_len > SSP_BITS_32)) {
+ dev_err(&pl022->adev->dev,
+ "CTRL LEN is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
+ && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
+ dev_err(&pl022->adev->dev,
+ "Wait State is configured incorrectly\n");
+ return -EINVAL;
+ }
+ /* Half duplex is only available in the ST Micro version */
+ if (pl022->vendor->extended_cr) {
+ if ((chip_info->duplex !=
+ SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
+ && (chip_info->duplex !=
+ SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
+ dev_err(&pl022->adev->dev,
+ "Microwire duplex mode is configured incorrectly\n");
+ return -EINVAL;
+ }
+ } else {
+ if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) {
+ dev_err(&pl022->adev->dev,
+ "Microwire half duplex mode requested,"
+ " but this is only available in the"
+ " ST version of PL022\n");
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
+static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
+{
+ return rate / (cpsdvsr * (1 + scr));
+}
+
+static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
+ ssp_clock_params * clk_freq)
+{
+ /* Lets calculate the frequency parameters */
+ u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN;
+ u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0,
+ best_scr = 0, tmp, found = 0;
+
+ rate = clk_get_rate(pl022->clk);
+ /* cpsdvscr = 2 & scr 0 */
+ max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN);
+ /* cpsdvsr = 254 & scr = 255 */
+ min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
+
+ if (freq > max_tclk)
+ dev_warn(&pl022->adev->dev,
+ "Max speed that can be programmed is %d Hz, you requested %d\n",
+ max_tclk, freq);
+
+ if (freq < min_tclk) {
+ dev_err(&pl022->adev->dev,
+ "Requested frequency: %d Hz is less than minimum possible %d Hz\n",
+ freq, min_tclk);
+ return -EINVAL;
+ }
+
+ /*
+ * best_freq will give closest possible available rate (<= requested
+ * freq) for all values of scr & cpsdvsr.
+ */
+ while ((cpsdvsr <= CPSDVR_MAX) && !found) {
+ while (scr <= SCR_MAX) {
+ tmp = spi_rate(rate, cpsdvsr, scr);
+
+ if (tmp > freq) {
+ /* we need lower freq */
+ scr++;
+ continue;
+ }
+
+ /*
+ * If found exact value, mark found and break.
+ * If found more closer value, update and break.
+ */
+ if (tmp > best_freq) {
+ best_freq = tmp;
+ best_cpsdvsr = cpsdvsr;
+ best_scr = scr;
+
+ if (tmp == freq)
+ found = 1;
+ }
+ /*
+ * increased scr will give lower rates, which are not
+ * required
+ */
+ break;
+ }
+ cpsdvsr += 2;
+ scr = SCR_MIN;
+ }
+
+ WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
+ freq);
+
+ clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
+ clk_freq->scr = (u8) (best_scr & 0xFF);
+ dev_dbg(&pl022->adev->dev,
+ "SSP Target Frequency is: %u, Effective Frequency is %u\n",
+ freq, best_freq);
+ dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n",
+ clk_freq->cpsdvsr, clk_freq->scr);
+
+ return 0;
+}
+
+/*
+ * A piece of default chip info unless the platform
+ * supplies it.
+ */
+static const struct pl022_config_chip pl022_default_chip_info = {
+ .com_mode = INTERRUPT_TRANSFER,
+ .iface = SSP_INTERFACE_MOTOROLA_SPI,
+ .hierarchy = SSP_MASTER,
+ .slave_tx_disable = DO_NOT_DRIVE_TX,
+ .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
+ .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
+ .ctrl_len = SSP_BITS_8,
+ .wait_state = SSP_MWIRE_WAIT_ZERO,
+ .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+};
+
+/**
+ * pl022_setup - setup function registered to SPI master framework
+ * @spi: spi device which is requesting setup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. If it is the first time when setup is called by this device,
+ * this function will initialize the runtime state for this chip and save
+ * the same in the device structure. Else it will update the runtime info
+ * with the updated chip info. Nothing is really being written to the
+ * controller hardware here, that is not done until the actual transfer
+ * commence.
+ */
+static int pl022_setup(struct spi_device *spi)
+{
+ struct pl022_config_chip const *chip_info;
+ struct pl022_config_chip chip_info_dt;
+ struct chip_data *chip;
+ struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
+ int status = 0;
+ struct pl022 *pl022 = spi_master_get_devdata(spi->master);
+ unsigned int bits = spi->bits_per_word;
+ u32 tmp;
+ struct device_node *np = spi->dev.of_node;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ /* Get controller_state if one is supplied */
+ chip = spi_get_ctldata(spi);
+
+ if (chip == NULL) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+ dev_dbg(&spi->dev,
+ "allocated memory for controller's runtime state\n");
+ }
+
+ /* Get controller data if one is supplied */
+ chip_info = spi->controller_data;
+
+ if (chip_info == NULL) {
+ if (np) {
+ chip_info_dt = pl022_default_chip_info;
+
+ chip_info_dt.hierarchy = SSP_MASTER;
+ of_property_read_u32(np, "pl022,interface",
+ &chip_info_dt.iface);
+ of_property_read_u32(np, "pl022,com-mode",
+ &chip_info_dt.com_mode);
+ of_property_read_u32(np, "pl022,rx-level-trig",
+ &chip_info_dt.rx_lev_trig);
+ of_property_read_u32(np, "pl022,tx-level-trig",
+ &chip_info_dt.tx_lev_trig);
+ of_property_read_u32(np, "pl022,ctrl-len",
+ &chip_info_dt.ctrl_len);
+ of_property_read_u32(np, "pl022,wait-state",
+ &chip_info_dt.wait_state);
+ of_property_read_u32(np, "pl022,duplex",
+ &chip_info_dt.duplex);
+
+ chip_info = &chip_info_dt;
+ } else {
+ chip_info = &pl022_default_chip_info;
+ /* spi_board_info.controller_data not is supplied */
+ dev_dbg(&spi->dev,
+ "using default controller_data settings\n");
+ }
+ } else
+ dev_dbg(&spi->dev,
+ "using user supplied controller_data settings\n");
+
+ /*
+ * We can override with custom divisors, else we use the board
+ * frequency setting
+ */
+ if ((0 == chip_info->clk_freq.cpsdvsr)
+ && (0 == chip_info->clk_freq.scr)) {
+ status = calculate_effective_freq(pl022,
+ spi->max_speed_hz,
+ &clk_freq);
+ if (status < 0)
+ goto err_config_params;
+ } else {
+ memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq));
+ if ((clk_freq.cpsdvsr % 2) != 0)
+ clk_freq.cpsdvsr =
+ clk_freq.cpsdvsr - 1;
+ }
+ if ((clk_freq.cpsdvsr < CPSDVR_MIN)
+ || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
+ status = -EINVAL;
+ dev_err(&spi->dev,
+ "cpsdvsr is configured incorrectly\n");
+ goto err_config_params;
+ }
+
+ status = verify_controller_parameters(pl022, chip_info);
+ if (status) {
+ dev_err(&spi->dev, "controller data is incorrect");
+ goto err_config_params;
+ }
+
+ pl022->rx_lev_trig = chip_info->rx_lev_trig;
+ pl022->tx_lev_trig = chip_info->tx_lev_trig;
+
+ /* Now set controller state based on controller data */
+ chip->xfer_type = chip_info->com_mode;
+
+ /* Check bits per word with vendor specific range */
+ if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
+ status = -ENOTSUPP;
+ dev_err(&spi->dev, "illegal data size for this controller!\n");
+ dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
+ pl022->vendor->max_bpw);
+ goto err_config_params;
+ } else if (bits <= 8) {
+ dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
+ chip->n_bytes = 1;
+ chip->read = READING_U8;
+ chip->write = WRITING_U8;
+ } else if (bits <= 16) {
+ dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
+ chip->n_bytes = 2;
+ chip->read = READING_U16;
+ chip->write = WRITING_U16;
+ } else {
+ dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
+ chip->n_bytes = 4;
+ chip->read = READING_U32;
+ chip->write = WRITING_U32;
+ }
+
+ /* Now Initialize all register settings required for this chip */
+ chip->cr0 = 0;
+ chip->cr1 = 0;
+ chip->dmacr = 0;
+ chip->cpsr = 0;
+ if ((chip_info->com_mode == DMA_TRANSFER)
+ && ((pl022->master_info)->enable_dma)) {
+ chip->enable_dma = true;
+ dev_dbg(&spi->dev, "DMA mode set in controller state\n");
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
+ SSP_DMACR_MASK_RXDMAE, 0);
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
+ SSP_DMACR_MASK_TXDMAE, 1);
+ } else {
+ chip->enable_dma = false;
+ dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
+ SSP_DMACR_MASK_RXDMAE, 0);
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
+ SSP_DMACR_MASK_TXDMAE, 1);
+ }
+
+ chip->cpsr = clk_freq.cpsdvsr;
+
+ /* Special setup for the ST micro extended control registers */
+ if (pl022->vendor->extended_cr) {
+ u32 etx;
+
+ if (pl022->vendor->pl023) {
+ /* These bits are only in the PL023 */
+ SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
+ SSP_CR1_MASK_FBCLKDEL_ST, 13);
+ } else {
+ /* These bits are in the PL022 but not PL023 */
+ SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
+ SSP_CR0_MASK_HALFDUP_ST, 5);
+ SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
+ SSP_CR0_MASK_CSS_ST, 16);
+ SSP_WRITE_BITS(chip->cr0, chip_info->iface,
+ SSP_CR0_MASK_FRF_ST, 21);
+ SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
+ SSP_CR1_MASK_MWAIT_ST, 6);
+ }
+ SSP_WRITE_BITS(chip->cr0, bits - 1,
+ SSP_CR0_MASK_DSS_ST, 0);
+
+ if (spi->mode & SPI_LSB_FIRST) {
+ tmp = SSP_RX_LSB;
+ etx = SSP_TX_LSB;
+ } else {
+ tmp = SSP_RX_MSB;
+ etx = SSP_TX_MSB;
+ }
+ SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4);
+ SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5);
+ SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
+ SSP_CR1_MASK_RXIFLSEL_ST, 7);
+ SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
+ SSP_CR1_MASK_TXIFLSEL_ST, 10);
+ } else {
+ SSP_WRITE_BITS(chip->cr0, bits - 1,
+ SSP_CR0_MASK_DSS, 0);
+ SSP_WRITE_BITS(chip->cr0, chip_info->iface,
+ SSP_CR0_MASK_FRF, 4);
+ }
+
+ /* Stuff that is common for all versions */
+ if (spi->mode & SPI_CPOL)
+ tmp = SSP_CLK_POL_IDLE_HIGH;
+ else
+ tmp = SSP_CLK_POL_IDLE_LOW;
+ SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6);
+
+ if (spi->mode & SPI_CPHA)
+ tmp = SSP_CLK_SECOND_EDGE;
+ else
+ tmp = SSP_CLK_FIRST_EDGE;
+ SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7);
+
+ SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8);
+ /* Loopback is available on all versions except PL023 */
+ if (pl022->vendor->loopback) {
+ if (spi->mode & SPI_LOOP)
+ tmp = LOOPBACK_ENABLED;
+ else
+ tmp = LOOPBACK_DISABLED;
+ SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0);
+ }
+ SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
+ SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
+ SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD,
+ 3);
+
+ /* Save controller_state */
+ spi_set_ctldata(spi, chip);
+ return status;
+ err_config_params:
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+ return status;
+}
+
+/**
+ * pl022_cleanup - cleanup function registered to SPI master framework
+ * @spi: spi device which is requesting cleanup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will free the runtime state of chip.
+ */
+static void pl022_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+}
+
+static struct pl022_ssp_controller *
+pl022_platform_data_dt_get(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct pl022_ssp_controller *pd;
+
+ if (!np) {
+ dev_err(dev, "no dt node defined\n");
+ return NULL;
+ }
+
+ pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
+ if (!pd)
+ return NULL;
+
+ pd->bus_id = -1;
+ pd->enable_dma = 1;
+ of_property_read_u32(np, "pl022,autosuspend-delay",
+ &pd->autosuspend_delay);
+ pd->rt = of_property_read_bool(np, "pl022,rt");
+
+ return pd;
+}
+
+static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct pl022_ssp_controller *platform_info =
+ dev_get_platdata(&adev->dev);
+ struct spi_master *master;
+ struct pl022 *pl022 = NULL; /*Data for this driver */
+ int status = 0;
+
+ dev_info(&adev->dev,
+ "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
+ if (!platform_info && IS_ENABLED(CONFIG_OF))
+ platform_info = pl022_platform_data_dt_get(dev);
+
+ if (!platform_info) {
+ dev_err(dev, "probe: no platform data defined\n");
+ return -ENODEV;
+ }
+
+ /* Allocate master with space for data */
+ master = spi_alloc_master(dev, sizeof(struct pl022));
+ if (master == NULL) {
+ dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
+ return -ENOMEM;
+ }
+
+ pl022 = spi_master_get_devdata(master);
+ pl022->master = master;
+ pl022->master_info = platform_info;
+ pl022->adev = adev;
+ pl022->vendor = id->data;
+
+ /*
+ * Bus Number Which has been Assigned to this SSP controller
+ * on this board
+ */
+ master->bus_num = platform_info->bus_id;
+ master->cleanup = pl022_cleanup;
+ master->setup = pl022_setup;
+ master->auto_runtime_pm = true;
+ master->transfer_one_message = pl022_transfer_one_message;
+ master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
+ master->rt = platform_info->rt;
+ master->dev.of_node = dev->of_node;
+ master->use_gpio_descriptors = true;
+
+ /*
+ * Supports mode 0-3, loopback, and active low CS. Transfers are
+ * always MS bit first on the original pl022.
+ */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ if (pl022->vendor->extended_cr)
+ master->mode_bits |= SPI_LSB_FIRST;
+
+ dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
+
+ status = amba_request_regions(adev, NULL);
+ if (status)
+ goto err_no_ioregion;
+
+ pl022->phybase = adev->res.start;
+ pl022->virtbase = devm_ioremap(dev, adev->res.start,
+ resource_size(&adev->res));
+ if (pl022->virtbase == NULL) {
+ status = -ENOMEM;
+ goto err_no_ioremap;
+ }
+ dev_info(&adev->dev, "mapped registers from %pa to %p\n",
+ &adev->res.start, pl022->virtbase);
+
+ pl022->clk = devm_clk_get(&adev->dev, NULL);
+ if (IS_ERR(pl022->clk)) {
+ status = PTR_ERR(pl022->clk);
+ dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
+ goto err_no_clk;
+ }
+
+ status = clk_prepare_enable(pl022->clk);
+ if (status) {
+ dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
+ goto err_no_clk_en;
+ }
+
+ /* Initialize transfer pump */
+ tasklet_init(&pl022->pump_transfers, pump_transfers,
+ (unsigned long)pl022);
+
+ /* Disable SSP */
+ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
+ SSP_CR1(pl022->virtbase));
+ load_ssp_default_config(pl022);
+
+ status = devm_request_irq(dev, adev->irq[0], pl022_interrupt_handler,
+ 0, "pl022", pl022);
+ if (status < 0) {
+ dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
+ goto err_no_irq;
+ }
+
+ /* Get DMA channels, try autoconfiguration first */
+ status = pl022_dma_autoprobe(pl022);
+ if (status == -EPROBE_DEFER) {
+ dev_dbg(dev, "deferring probe to get DMA channel\n");
+ goto err_no_irq;
+ }
+
+ /* If that failed, use channels from platform_info */
+ if (status == 0)
+ platform_info->enable_dma = 1;
+ else if (platform_info->enable_dma) {
+ status = pl022_dma_probe(pl022);
+ if (status != 0)
+ platform_info->enable_dma = 0;
+ }
+
+ /* Register with the SPI framework */
+ amba_set_drvdata(adev, pl022);
+ status = devm_spi_register_master(&adev->dev, master);
+ if (status != 0) {
+ dev_err(&adev->dev,
+ "probe - problem registering spi master\n");
+ goto err_spi_register;
+ }
+ dev_dbg(dev, "probe succeeded\n");
+
+ /* let runtime pm put suspend */
+ if (platform_info->autosuspend_delay > 0) {
+ dev_info(&adev->dev,
+ "will use autosuspend for runtime pm, delay %dms\n",
+ platform_info->autosuspend_delay);
+ pm_runtime_set_autosuspend_delay(dev,
+ platform_info->autosuspend_delay);
+ pm_runtime_use_autosuspend(dev);
+ }
+ pm_runtime_put(dev);
+
+ return 0;
+
+ err_spi_register:
+ if (platform_info->enable_dma)
+ pl022_dma_remove(pl022);
+ err_no_irq:
+ clk_disable_unprepare(pl022->clk);
+ err_no_clk_en:
+ err_no_clk:
+ err_no_ioremap:
+ amba_release_regions(adev);
+ err_no_ioregion:
+ spi_master_put(master);
+ return status;
+}
+
+static void
+pl022_remove(struct amba_device *adev)
+{
+ struct pl022 *pl022 = amba_get_drvdata(adev);
+
+ if (!pl022)
+ return;
+
+ /*
+ * undo pm_runtime_put() in probe. I assume that we're not
+ * accessing the primecell here.
+ */
+ pm_runtime_get_noresume(&adev->dev);
+
+ load_ssp_default_config(pl022);
+ if (pl022->master_info->enable_dma)
+ pl022_dma_remove(pl022);
+
+ clk_disable_unprepare(pl022->clk);
+ amba_release_regions(adev);
+ tasklet_disable(&pl022->pump_transfers);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pl022_suspend(struct device *dev)
+{
+ struct pl022 *pl022 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(pl022->master);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ spi_master_resume(pl022->master);
+ return ret;
+ }
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ dev_dbg(dev, "suspended\n");
+ return 0;
+}
+
+static int pl022_resume(struct device *dev)
+{
+ struct pl022 *pl022 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ dev_err(dev, "problem resuming\n");
+
+ /* Start the queue running */
+ ret = spi_master_resume(pl022->master);
+ if (!ret)
+ dev_dbg(dev, "resumed\n");
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int pl022_runtime_suspend(struct device *dev)
+{
+ struct pl022 *pl022 = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pl022->clk);
+ pinctrl_pm_select_idle_state(dev);
+
+ return 0;
+}
+
+static int pl022_runtime_resume(struct device *dev)
+{
+ struct pl022 *pl022 = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+ clk_prepare_enable(pl022->clk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops pl022_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
+ SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
+};
+
+static struct vendor_data vendor_arm = {
+ .fifodepth = 8,
+ .max_bpw = 16,
+ .unidir = false,
+ .extended_cr = false,
+ .pl023 = false,
+ .loopback = true,
+ .internal_cs_ctrl = false,
+};
+
+static struct vendor_data vendor_st = {
+ .fifodepth = 32,
+ .max_bpw = 32,
+ .unidir = false,
+ .extended_cr = true,
+ .pl023 = false,
+ .loopback = true,
+ .internal_cs_ctrl = false,
+};
+
+static struct vendor_data vendor_st_pl023 = {
+ .fifodepth = 32,
+ .max_bpw = 32,
+ .unidir = false,
+ .extended_cr = true,
+ .pl023 = true,
+ .loopback = false,
+ .internal_cs_ctrl = false,
+};
+
+static struct vendor_data vendor_lsi = {
+ .fifodepth = 8,
+ .max_bpw = 16,
+ .unidir = false,
+ .extended_cr = false,
+ .pl023 = false,
+ .loopback = true,
+ .internal_cs_ctrl = true,
+};
+
+static const struct amba_id pl022_ids[] = {
+ {
+ /*
+ * ARM PL022 variant, this has a 16bit wide
+ * and 8 locations deep TX/RX FIFO
+ */
+ .id = 0x00041022,
+ .mask = 0x000fffff,
+ .data = &vendor_arm,
+ },
+ {
+ /*
+ * ST Micro derivative, this has 32bit wide
+ * and 32 locations deep TX/RX FIFO
+ */
+ .id = 0x01080022,
+ .mask = 0xffffffff,
+ .data = &vendor_st,
+ },
+ {
+ /*
+ * ST-Ericsson derivative "PL023" (this is not
+ * an official ARM number), this is a PL022 SSP block
+ * stripped to SPI mode only, it has 32bit wide
+ * and 32 locations deep TX/RX FIFO but no extended
+ * CR0/CR1 register
+ */
+ .id = 0x00080023,
+ .mask = 0xffffffff,
+ .data = &vendor_st_pl023,
+ },
+ {
+ /*
+ * PL022 variant that has a chip select control register whih
+ * allows control of 5 output signals nCS[0:4].
+ */
+ .id = 0x000b6022,
+ .mask = 0x000fffff,
+ .data = &vendor_lsi,
+ },
+ { 0, 0 },
+};
+
+MODULE_DEVICE_TABLE(amba, pl022_ids);
+
+static struct amba_driver pl022_driver = {
+ .drv = {
+ .name = "ssp-pl022",
+ .pm = &pl022_dev_pm_ops,
+ },
+ .id_table = pl022_ids,
+ .probe = pl022_probe,
+ .remove = pl022_remove,
+};
+
+static int __init pl022_init(void)
+{
+ return amba_driver_register(&pl022_driver);
+}
+subsys_initcall(pl022_init);
+
+static void __exit pl022_exit(void)
+{
+ amba_driver_unregister(&pl022_driver);
+}
+module_exit(pl022_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+MODULE_DESCRIPTION("PL022 SSP Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
new file mode 100644
index 000000000..d65f047b6
--- /dev/null
+++ b/drivers/spi/spi-ppc4xx.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI_PPC4XX SPI controller driver.
+ *
+ * Copyright (C) 2007 Gary Jennejohn <garyj@denx.de>
+ * Copyright 2008 Stefan Roese <sr@denx.de>, DENX Software Engineering
+ * Copyright 2009 Harris Corporation, Steven A. Falco <sfalco@harris.com>
+ *
+ * Based in part on drivers/spi/spi_s3c24xx.c
+ *
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ */
+
+/*
+ * The PPC4xx SPI controller has no FIFO so each sent/received byte will
+ * generate an interrupt to the CPU. This can cause high CPU utilization.
+ * This driver allows platforms to reduce the interrupt load on the CPU
+ * during SPI transfers by setting max_speed_hz via the device tree.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <linux/io.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+
+/* bits in mode register - bit 0 is MSb */
+
+/*
+ * SPI_PPC4XX_MODE_SCP = 0 means "data latched on trailing edge of clock"
+ * SPI_PPC4XX_MODE_SCP = 1 means "data latched on leading edge of clock"
+ * Note: This is the inverse of CPHA.
+ */
+#define SPI_PPC4XX_MODE_SCP (0x80 >> 3)
+
+/* SPI_PPC4XX_MODE_SPE = 1 means "port enabled" */
+#define SPI_PPC4XX_MODE_SPE (0x80 >> 4)
+
+/*
+ * SPI_PPC4XX_MODE_RD = 0 means "MSB first" - this is the normal mode
+ * SPI_PPC4XX_MODE_RD = 1 means "LSB first" - this is bit-reversed mode
+ * Note: This is identical to SPI_LSB_FIRST.
+ */
+#define SPI_PPC4XX_MODE_RD (0x80 >> 5)
+
+/*
+ * SPI_PPC4XX_MODE_CI = 0 means "clock idles low"
+ * SPI_PPC4XX_MODE_CI = 1 means "clock idles high"
+ * Note: This is identical to CPOL.
+ */
+#define SPI_PPC4XX_MODE_CI (0x80 >> 6)
+
+/*
+ * SPI_PPC4XX_MODE_IL = 0 means "loopback disable"
+ * SPI_PPC4XX_MODE_IL = 1 means "loopback enable"
+ */
+#define SPI_PPC4XX_MODE_IL (0x80 >> 7)
+
+/* bits in control register */
+/* starts a transfer when set */
+#define SPI_PPC4XX_CR_STR (0x80 >> 7)
+
+/* bits in status register */
+/* port is busy with a transfer */
+#define SPI_PPC4XX_SR_BSY (0x80 >> 6)
+/* RxD ready */
+#define SPI_PPC4XX_SR_RBR (0x80 >> 7)
+
+/* clock settings (SCP and CI) for various SPI modes */
+#define SPI_CLK_MODE0 (SPI_PPC4XX_MODE_SCP | 0)
+#define SPI_CLK_MODE1 (0 | 0)
+#define SPI_CLK_MODE2 (SPI_PPC4XX_MODE_SCP | SPI_PPC4XX_MODE_CI)
+#define SPI_CLK_MODE3 (0 | SPI_PPC4XX_MODE_CI)
+
+#define DRIVER_NAME "spi_ppc4xx_of"
+
+struct spi_ppc4xx_regs {
+ u8 mode;
+ u8 rxd;
+ u8 txd;
+ u8 cr;
+ u8 sr;
+ u8 dummy;
+ /*
+ * Clock divisor modulus register
+ * This uses the following formula:
+ * SCPClkOut = OPBCLK/(4(CDM + 1))
+ * or
+ * CDM = (OPBCLK/4*SCPClkOut) - 1
+ * bit 0 is the MSb!
+ */
+ u8 cdm;
+};
+
+/* SPI Controller driver's private data. */
+struct ppc4xx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ u64 mapbase;
+ u64 mapsize;
+ int irqnum;
+ /* need this to set the SPI clock */
+ unsigned int opb_freq;
+
+ /* for transfers */
+ int len;
+ int count;
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+
+ struct spi_ppc4xx_regs __iomem *regs; /* pointer to the registers */
+ struct spi_master *master;
+ struct device *dev;
+};
+
+/* need this so we can set the clock in the chipselect routine */
+struct spi_ppc4xx_cs {
+ u8 mode;
+};
+
+static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct ppc4xx_spi *hw;
+ u8 data;
+
+ dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
+ t->tx_buf, t->rx_buf, t->len);
+
+ hw = spi_master_get_devdata(spi->master);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->count = 0;
+
+ /* send the first byte */
+ data = hw->tx ? hw->tx[0] : 0;
+ out_8(&hw->regs->txd, data);
+ out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR);
+ wait_for_completion(&hw->done);
+
+ return hw->count;
+}
+
+static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master);
+ struct spi_ppc4xx_cs *cs = spi->controller_state;
+ int scr;
+ u8 cdm = 0;
+ u32 speed;
+ u8 bits_per_word;
+
+ /* Start with the generic configuration for this device. */
+ bits_per_word = spi->bits_per_word;
+ speed = spi->max_speed_hz;
+
+ /*
+ * Modify the configuration if the transfer overrides it. Do not allow
+ * the transfer to overwrite the generic configuration with zeros.
+ */
+ if (t) {
+ if (t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+
+ if (t->speed_hz)
+ speed = min(t->speed_hz, spi->max_speed_hz);
+ }
+
+ if (!speed || (speed > spi->max_speed_hz)) {
+ dev_err(&spi->dev, "invalid speed_hz (%d)\n", speed);
+ return -EINVAL;
+ }
+
+ /* Write new configuration */
+ out_8(&hw->regs->mode, cs->mode);
+
+ /* Set the clock */
+ /* opb_freq was already divided by 4 */
+ scr = (hw->opb_freq / speed) - 1;
+ if (scr > 0)
+ cdm = min(scr, 0xff);
+
+ dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", cdm, speed);
+
+ if (in_8(&hw->regs->cdm) != cdm)
+ out_8(&hw->regs->cdm, cdm);
+
+ mutex_lock(&hw->bitbang.lock);
+ if (!hw->bitbang.busy) {
+ hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
+ /* Need to ndelay here? */
+ }
+ mutex_unlock(&hw->bitbang.lock);
+
+ return 0;
+}
+
+static int spi_ppc4xx_setup(struct spi_device *spi)
+{
+ struct spi_ppc4xx_cs *cs = spi->controller_state;
+
+ if (!spi->max_speed_hz) {
+ dev_err(&spi->dev, "invalid max_speed_hz (must be non-zero)\n");
+ return -EINVAL;
+ }
+
+ if (cs == NULL) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi->controller_state = cs;
+ }
+
+ /*
+ * We set all bits of the SPI0_MODE register, so,
+ * no need to read-modify-write
+ */
+ cs->mode = SPI_PPC4XX_MODE_SPE;
+
+ switch (spi->mode & SPI_MODE_X_MASK) {
+ case SPI_MODE_0:
+ cs->mode |= SPI_CLK_MODE0;
+ break;
+ case SPI_MODE_1:
+ cs->mode |= SPI_CLK_MODE1;
+ break;
+ case SPI_MODE_2:
+ cs->mode |= SPI_CLK_MODE2;
+ break;
+ case SPI_MODE_3:
+ cs->mode |= SPI_CLK_MODE3;
+ break;
+ }
+
+ if (spi->mode & SPI_LSB_FIRST)
+ cs->mode |= SPI_PPC4XX_MODE_RD;
+
+ return 0;
+}
+
+static irqreturn_t spi_ppc4xx_int(int irq, void *dev_id)
+{
+ struct ppc4xx_spi *hw;
+ u8 status;
+ u8 data;
+ unsigned int count;
+
+ hw = (struct ppc4xx_spi *)dev_id;
+
+ status = in_8(&hw->regs->sr);
+ if (!status)
+ return IRQ_NONE;
+
+ /*
+ * BSY de-asserts one cycle after the transfer is complete. The
+ * interrupt is asserted after the transfer is complete. The exact
+ * relationship is not documented, hence this code.
+ */
+
+ if (unlikely(status & SPI_PPC4XX_SR_BSY)) {
+ u8 lstatus;
+ int cnt = 0;
+
+ dev_dbg(hw->dev, "got interrupt but spi still busy?\n");
+ do {
+ ndelay(10);
+ lstatus = in_8(&hw->regs->sr);
+ } while (++cnt < 100 && lstatus & SPI_PPC4XX_SR_BSY);
+
+ if (cnt >= 100) {
+ dev_err(hw->dev, "busywait: too many loops!\n");
+ complete(&hw->done);
+ return IRQ_HANDLED;
+ } else {
+ /* status is always 1 (RBR) here */
+ status = in_8(&hw->regs->sr);
+ dev_dbg(hw->dev, "loops %d status %x\n", cnt, status);
+ }
+ }
+
+ count = hw->count;
+ hw->count++;
+
+ /* RBR triggered this interrupt. Therefore, data must be ready. */
+ data = in_8(&hw->regs->rxd);
+ if (hw->rx)
+ hw->rx[count] = data;
+
+ count++;
+
+ if (count < hw->len) {
+ data = hw->tx ? hw->tx[count] : 0;
+ out_8(&hw->regs->txd, data);
+ out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR);
+ } else {
+ complete(&hw->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void spi_ppc4xx_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+
+static void spi_ppc4xx_enable(struct ppc4xx_spi *hw)
+{
+ /*
+ * On all 4xx PPC's the SPI bus is shared/multiplexed with
+ * the 2nd I2C bus. We need to enable the SPI bus before
+ * using it.
+ */
+
+ /* need to clear bit 14 to enable SPC */
+ dcri_clrset(SDR0, SDR0_PFC1, 0x80000000 >> 14, 0);
+}
+
+/*
+ * platform_device layer stuff...
+ */
+static int spi_ppc4xx_of_probe(struct platform_device *op)
+{
+ struct ppc4xx_spi *hw;
+ struct spi_master *master;
+ struct spi_bitbang *bbp;
+ struct resource resource;
+ struct device_node *np = op->dev.of_node;
+ struct device *dev = &op->dev;
+ struct device_node *opbnp;
+ int ret;
+ const unsigned int *clk;
+
+ master = spi_alloc_master(dev, sizeof(*hw));
+ if (master == NULL)
+ return -ENOMEM;
+ master->dev.of_node = np;
+ platform_set_drvdata(op, master);
+ hw = spi_master_get_devdata(master);
+ hw->master = master;
+ hw->dev = dev;
+
+ init_completion(&hw->done);
+
+ /* Setup the state for the bitbang driver */
+ bbp = &hw->bitbang;
+ bbp->master = hw->master;
+ bbp->setup_transfer = spi_ppc4xx_setupxfer;
+ bbp->txrx_bufs = spi_ppc4xx_txrx;
+ bbp->use_dma = 0;
+ bbp->master->setup = spi_ppc4xx_setup;
+ bbp->master->cleanup = spi_ppc4xx_cleanup;
+ bbp->master->bits_per_word_mask = SPI_BPW_MASK(8);
+ bbp->master->use_gpio_descriptors = true;
+ /*
+ * The SPI core will count the number of GPIO descriptors to figure
+ * out the number of chip selects available on the platform.
+ */
+ bbp->master->num_chipselect = 0;
+
+ /* the spi->mode bits understood by this driver: */
+ bbp->master->mode_bits =
+ SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
+
+ /* Get the clock for the OPB */
+ opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb");
+ if (opbnp == NULL) {
+ dev_err(dev, "OPB: cannot find node\n");
+ ret = -ENODEV;
+ goto free_master;
+ }
+ /* Get the clock (Hz) for the OPB */
+ clk = of_get_property(opbnp, "clock-frequency", NULL);
+ if (clk == NULL) {
+ dev_err(dev, "OPB: no clock-frequency property set\n");
+ of_node_put(opbnp);
+ ret = -ENODEV;
+ goto free_master;
+ }
+ hw->opb_freq = *clk;
+ hw->opb_freq >>= 2;
+ of_node_put(opbnp);
+
+ ret = of_address_to_resource(np, 0, &resource);
+ if (ret) {
+ dev_err(dev, "error while parsing device node resource\n");
+ goto free_master;
+ }
+ hw->mapbase = resource.start;
+ hw->mapsize = resource_size(&resource);
+
+ /* Sanity check */
+ if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) {
+ dev_err(dev, "too small to map registers\n");
+ ret = -EINVAL;
+ goto free_master;
+ }
+
+ /* Request IRQ */
+ hw->irqnum = irq_of_parse_and_map(np, 0);
+ ret = request_irq(hw->irqnum, spi_ppc4xx_int,
+ 0, "spi_ppc4xx_of", (void *)hw);
+ if (ret) {
+ dev_err(dev, "unable to allocate interrupt\n");
+ goto free_master;
+ }
+
+ if (!request_mem_region(hw->mapbase, hw->mapsize, DRIVER_NAME)) {
+ dev_err(dev, "resource unavailable\n");
+ ret = -EBUSY;
+ goto request_mem_error;
+ }
+
+ hw->regs = ioremap(hw->mapbase, sizeof(struct spi_ppc4xx_regs));
+
+ if (!hw->regs) {
+ dev_err(dev, "unable to memory map registers\n");
+ ret = -ENXIO;
+ goto map_io_error;
+ }
+
+ spi_ppc4xx_enable(hw);
+
+ /* Finally register our spi controller */
+ dev->dma_mask = 0;
+ ret = spi_bitbang_start(bbp);
+ if (ret) {
+ dev_err(dev, "failed to register SPI master\n");
+ goto unmap_regs;
+ }
+
+ dev_info(dev, "driver initialized\n");
+
+ return 0;
+
+unmap_regs:
+ iounmap(hw->regs);
+map_io_error:
+ release_mem_region(hw->mapbase, hw->mapsize);
+request_mem_error:
+ free_irq(hw->irqnum, hw);
+free_master:
+ spi_master_put(master);
+
+ dev_err(dev, "initialization failed\n");
+ return ret;
+}
+
+static int spi_ppc4xx_of_remove(struct platform_device *op)
+{
+ struct spi_master *master = platform_get_drvdata(op);
+ struct ppc4xx_spi *hw = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&hw->bitbang);
+ release_mem_region(hw->mapbase, hw->mapsize);
+ free_irq(hw->irqnum, hw);
+ iounmap(hw->regs);
+ spi_master_put(master);
+ return 0;
+}
+
+static const struct of_device_id spi_ppc4xx_of_match[] = {
+ { .compatible = "ibm,ppc4xx-spi", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
+
+static struct platform_driver spi_ppc4xx_of_driver = {
+ .probe = spi_ppc4xx_of_probe,
+ .remove = spi_ppc4xx_of_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = spi_ppc4xx_of_match,
+ },
+};
+module_platform_driver(spi_ppc4xx_of_driver);
+
+MODULE_AUTHOR("Gary Jennejohn & Stefan Roese");
+MODULE_DESCRIPTION("Simple PPC4xx SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
new file mode 100644
index 000000000..be563f0dd
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PXA2xx SPI DMA engine support.
+ *
+ * Copyright (C) 2013, 2021 Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
+
+#include "spi-pxa2xx.h"
+
+static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
+ bool error)
+{
+ struct spi_message *msg = drv_data->controller->cur_msg;
+
+ /*
+ * It is possible that one CPU is handling ROR interrupt and other
+ * just gets DMA completion. Calling pump_transfers() twice for the
+ * same transfer leads to problems thus we prevent concurrent calls
+ * by using dma_running.
+ */
+ if (atomic_dec_and_test(&drv_data->dma_running)) {
+ /*
+ * If the other CPU is still handling the ROR interrupt we
+ * might not know about the error yet. So we re-check the
+ * ROR bit here before we clear the status register.
+ */
+ if (!error)
+ error = read_SSSR_bits(drv_data, drv_data->mask_sr) & SSSR_ROR;
+
+ /* Clear status & disable interrupts */
+ clear_SSCR1_bits(drv_data, drv_data->dma_cr1);
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+
+ if (error) {
+ /* In case we got an error we disable the SSP now */
+ pxa_ssp_disable(drv_data->ssp);
+ msg->status = -EIO;
+ }
+
+ spi_finalize_current_transfer(drv_data->controller);
+ }
+}
+
+static void pxa2xx_spi_dma_callback(void *data)
+{
+ pxa2xx_spi_dma_transfer_complete(data, false);
+}
+
+static struct dma_async_tx_descriptor *
+pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
+ enum dma_transfer_direction dir,
+ struct spi_transfer *xfer)
+{
+ struct chip_data *chip =
+ spi_get_ctldata(drv_data->controller->cur_msg->spi);
+ enum dma_slave_buswidth width;
+ struct dma_slave_config cfg;
+ struct dma_chan *chan;
+ struct sg_table *sgt;
+ int ret;
+
+ switch (drv_data->n_bytes) {
+ case 1:
+ width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case 2:
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ default:
+ width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = dir;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ cfg.dst_addr = drv_data->ssp->phys_base + SSDR;
+ cfg.dst_addr_width = width;
+ cfg.dst_maxburst = chip->dma_burst_size;
+
+ sgt = &xfer->tx_sg;
+ chan = drv_data->controller->dma_tx;
+ } else {
+ cfg.src_addr = drv_data->ssp->phys_base + SSDR;
+ cfg.src_addr_width = width;
+ cfg.src_maxburst = chip->dma_burst_size;
+
+ sgt = &xfer->rx_sg;
+ chan = drv_data->controller->dma_rx;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_warn(drv_data->ssp->dev, "DMA slave config failed\n");
+ return NULL;
+ }
+
+ return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+}
+
+irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
+{
+ u32 status;
+
+ status = read_SSSR_bits(drv_data, drv_data->mask_sr);
+ if (status & SSSR_ROR) {
+ dev_err(drv_data->ssp->dev, "FIFO overrun\n");
+
+ dmaengine_terminate_async(drv_data->controller->dma_rx);
+ dmaengine_terminate_async(drv_data->controller->dma_tx);
+
+ pxa2xx_spi_dma_transfer_complete(drv_data, true);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
+ struct spi_transfer *xfer)
+{
+ struct dma_async_tx_descriptor *tx_desc, *rx_desc;
+ int err;
+
+ tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer);
+ if (!tx_desc) {
+ dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n");
+ err = -EBUSY;
+ goto err_tx;
+ }
+
+ rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer);
+ if (!rx_desc) {
+ dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n");
+ err = -EBUSY;
+ goto err_rx;
+ }
+
+ /* We are ready when RX completes */
+ rx_desc->callback = pxa2xx_spi_dma_callback;
+ rx_desc->callback_param = drv_data;
+
+ dmaengine_submit(rx_desc);
+ dmaengine_submit(tx_desc);
+ return 0;
+
+err_rx:
+ dmaengine_terminate_async(drv_data->controller->dma_tx);
+err_tx:
+ return err;
+}
+
+void pxa2xx_spi_dma_start(struct driver_data *drv_data)
+{
+ dma_async_issue_pending(drv_data->controller->dma_rx);
+ dma_async_issue_pending(drv_data->controller->dma_tx);
+
+ atomic_set(&drv_data->dma_running, 1);
+}
+
+void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
+{
+ atomic_set(&drv_data->dma_running, 0);
+ dmaengine_terminate_sync(drv_data->controller->dma_rx);
+ dmaengine_terminate_sync(drv_data->controller->dma_tx);
+}
+
+int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
+{
+ struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
+ struct spi_controller *controller = drv_data->controller;
+ struct device *dev = drv_data->ssp->dev;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ controller->dma_tx = dma_request_slave_channel_compat(mask,
+ pdata->dma_filter, pdata->tx_param, dev, "tx");
+ if (!controller->dma_tx)
+ return -ENODEV;
+
+ controller->dma_rx = dma_request_slave_channel_compat(mask,
+ pdata->dma_filter, pdata->rx_param, dev, "rx");
+ if (!controller->dma_rx) {
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void pxa2xx_spi_dma_release(struct driver_data *drv_data)
+{
+ struct spi_controller *controller = drv_data->controller;
+
+ if (controller->dma_rx) {
+ dmaengine_terminate_sync(controller->dma_rx);
+ dma_release_channel(controller->dma_rx);
+ controller->dma_rx = NULL;
+ }
+ if (controller->dma_tx) {
+ dmaengine_terminate_sync(controller->dma_tx);
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
+ }
+}
+
+int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word, u32 *burst_code,
+ u32 *threshold)
+{
+ struct pxa2xx_spi_chip *chip_info = spi->controller_data;
+ struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
+ u32 dma_burst_size = drv_data->controller_info->dma_burst_size;
+
+ /*
+ * If the DMA burst size is given in chip_info we use that,
+ * otherwise we use the default. Also we use the default FIFO
+ * thresholds for now.
+ */
+ *burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size;
+ *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
+ | SSCR1_TxTresh(TX_THRESH_DFLT);
+
+ return 0;
+}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
new file mode 100644
index 000000000..861b21c63
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCI glue driver for SPI PXA2xx compatible controllers.
+ * CE4100's SPI device is more or less the same one as found on PXA.
+ *
+ * Copyright (C) 2016, 2021 Intel Corporation
+ */
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/pxa2xx_spi.h>
+
+#include <linux/dmaengine.h>
+#include <linux/platform_data/dma-dw.h>
+
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000 0x0935
+#define PCI_DEVICE_ID_INTEL_BYT 0x0f0e
+#define PCI_DEVICE_ID_INTEL_MRFLD 0x1194
+#define PCI_DEVICE_ID_INTEL_BSW0 0x228e
+#define PCI_DEVICE_ID_INTEL_BSW1 0x2290
+#define PCI_DEVICE_ID_INTEL_BSW2 0x22ac
+#define PCI_DEVICE_ID_INTEL_CE4100 0x2e6a
+#define PCI_DEVICE_ID_INTEL_LPT0_0 0x9c65
+#define PCI_DEVICE_ID_INTEL_LPT0_1 0x9c66
+#define PCI_DEVICE_ID_INTEL_LPT1_0 0x9ce5
+#define PCI_DEVICE_ID_INTEL_LPT1_1 0x9ce6
+
+struct pxa_spi_info {
+ int (*setup)(struct pci_dev *pdev, struct pxa2xx_spi_controller *c);
+};
+
+static struct dw_dma_slave byt_tx_param = { .dst_id = 0 };
+static struct dw_dma_slave byt_rx_param = { .src_id = 1 };
+
+static struct dw_dma_slave mrfld3_tx_param = { .dst_id = 15 };
+static struct dw_dma_slave mrfld3_rx_param = { .src_id = 14 };
+static struct dw_dma_slave mrfld5_tx_param = { .dst_id = 13 };
+static struct dw_dma_slave mrfld5_rx_param = { .src_id = 12 };
+static struct dw_dma_slave mrfld6_tx_param = { .dst_id = 11 };
+static struct dw_dma_slave mrfld6_rx_param = { .src_id = 10 };
+
+static struct dw_dma_slave bsw0_tx_param = { .dst_id = 0 };
+static struct dw_dma_slave bsw0_rx_param = { .src_id = 1 };
+static struct dw_dma_slave bsw1_tx_param = { .dst_id = 6 };
+static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
+static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
+static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
+
+static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 };
+static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 };
+static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 };
+static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 };
+
+static void pxa2xx_spi_pci_clk_unregister(void *clk)
+{
+ clk_unregister(clk);
+}
+
+static int pxa2xx_spi_pci_clk_register(struct pci_dev *dev, struct ssp_device *ssp,
+ unsigned long rate)
+{
+ char buf[40];
+
+ snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
+ ssp->clk = clk_register_fixed_rate(&dev->dev, buf, NULL, 0, rate);
+ if (IS_ERR(ssp->clk))
+ return PTR_ERR(ssp->clk);
+
+ return devm_add_action_or_reset(&dev->dev, pxa2xx_spi_pci_clk_unregister, ssp->clk);
+}
+
+static bool lpss_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_slave *dws = param;
+
+ if (dws->dma_dev != chan->device->dev)
+ return false;
+
+ chan->private = dws;
+ return true;
+}
+
+static void lpss_dma_put_device(void *dma_dev)
+{
+ pci_dev_put(dma_dev);
+}
+
+static int lpss_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
+{
+ struct ssp_device *ssp = &c->ssp;
+ struct dw_dma_slave *tx, *rx;
+ struct pci_dev *dma_dev;
+ int ret;
+
+ switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_BYT:
+ ssp->type = LPSS_BYT_SSP;
+ ssp->port_id = 0;
+ c->tx_param = &byt_tx_param;
+ c->rx_param = &byt_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_BSW0:
+ ssp->type = LPSS_BSW_SSP;
+ ssp->port_id = 0;
+ c->tx_param = &bsw0_tx_param;
+ c->rx_param = &bsw0_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_BSW1:
+ ssp->type = LPSS_BSW_SSP;
+ ssp->port_id = 1;
+ c->tx_param = &bsw1_tx_param;
+ c->rx_param = &bsw1_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_BSW2:
+ ssp->type = LPSS_BSW_SSP;
+ ssp->port_id = 2;
+ c->tx_param = &bsw2_tx_param;
+ c->rx_param = &bsw2_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_LPT0_0:
+ case PCI_DEVICE_ID_INTEL_LPT1_0:
+ ssp->type = LPSS_LPT_SSP;
+ ssp->port_id = 0;
+ c->tx_param = &lpt0_tx_param;
+ c->rx_param = &lpt0_rx_param;
+ break;
+ case PCI_DEVICE_ID_INTEL_LPT0_1:
+ case PCI_DEVICE_ID_INTEL_LPT1_1:
+ ssp->type = LPSS_LPT_SSP;
+ ssp->port_id = 1;
+ c->tx_param = &lpt1_tx_param;
+ c->rx_param = &lpt1_rx_param;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ c->num_chipselect = 1;
+
+ ret = pxa2xx_spi_pci_clk_register(dev, ssp, 50000000);
+ if (ret)
+ return ret;
+
+ dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
+ if (ret)
+ return ret;
+
+ tx = c->tx_param;
+ tx->dma_dev = &dma_dev->dev;
+ tx->m_master = 0;
+ tx->p_master = 1;
+
+ rx = c->rx_param;
+ rx->dma_dev = &dma_dev->dev;
+ rx->m_master = 0;
+ rx->p_master = 1;
+
+ c->dma_filter = lpss_dma_filter;
+ c->dma_burst_size = 1;
+ c->enable_dma = 1;
+ return 0;
+}
+
+static const struct pxa_spi_info lpss_info_config = {
+ .setup = lpss_spi_setup,
+};
+
+static int ce4100_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
+{
+ struct ssp_device *ssp = &c->ssp;
+
+ ssp->type = PXA25x_SSP;
+ ssp->port_id = dev->devfn;
+ c->num_chipselect = dev->devfn;
+
+ return pxa2xx_spi_pci_clk_register(dev, ssp, 3686400);
+}
+
+static const struct pxa_spi_info ce4100_info_config = {
+ .setup = ce4100_spi_setup,
+};
+
+static int mrfld_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
+{
+ struct ssp_device *ssp = &c->ssp;
+ struct dw_dma_slave *tx, *rx;
+ struct pci_dev *dma_dev;
+ int ret;
+
+ ssp->type = MRFLD_SSP;
+
+ switch (PCI_FUNC(dev->devfn)) {
+ case 0:
+ ssp->port_id = 3;
+ c->num_chipselect = 1;
+ c->tx_param = &mrfld3_tx_param;
+ c->rx_param = &mrfld3_rx_param;
+ break;
+ case 1:
+ ssp->port_id = 5;
+ c->num_chipselect = 4;
+ c->tx_param = &mrfld5_tx_param;
+ c->rx_param = &mrfld5_rx_param;
+ break;
+ case 2:
+ ssp->port_id = 6;
+ c->num_chipselect = 1;
+ c->tx_param = &mrfld6_tx_param;
+ c->rx_param = &mrfld6_rx_param;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = pxa2xx_spi_pci_clk_register(dev, ssp, 25000000);
+ if (ret)
+ return ret;
+
+ dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
+ ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
+ if (ret)
+ return ret;
+
+ tx = c->tx_param;
+ tx->dma_dev = &dma_dev->dev;
+
+ rx = c->rx_param;
+ rx->dma_dev = &dma_dev->dev;
+
+ c->dma_filter = lpss_dma_filter;
+ c->dma_burst_size = 8;
+ c->enable_dma = 1;
+ return 0;
+}
+
+static const struct pxa_spi_info mrfld_info_config = {
+ .setup = mrfld_spi_setup,
+};
+
+static int qrk_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
+{
+ struct ssp_device *ssp = &c->ssp;
+
+ ssp->type = QUARK_X1000_SSP;
+ ssp->port_id = dev->devfn;
+ c->num_chipselect = 1;
+
+ return pxa2xx_spi_pci_clk_register(dev, ssp, 50000000);
+}
+
+static const struct pxa_spi_info qrk_info_config = {
+ .setup = qrk_spi_setup,
+};
+
+static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ const struct pxa_spi_info *info;
+ struct platform_device_info pi;
+ int ret;
+ struct platform_device *pdev;
+ struct pxa2xx_spi_controller spi_pdata;
+ struct ssp_device *ssp;
+
+ ret = pcim_enable_device(dev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI");
+ if (ret)
+ return ret;
+
+ memset(&spi_pdata, 0, sizeof(spi_pdata));
+
+ ssp = &spi_pdata.ssp;
+ ssp->dev = &dev->dev;
+ ssp->phys_base = pci_resource_start(dev, 0);
+ ssp->mmio_base = pcim_iomap_table(dev)[0];
+
+ info = (struct pxa_spi_info *)ent->driver_data;
+ ret = info->setup(dev, &spi_pdata);
+ if (ret)
+ return ret;
+
+ pci_set_master(dev);
+
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+ ssp->irq = pci_irq_vector(dev, 0);
+
+ memset(&pi, 0, sizeof(pi));
+ pi.fwnode = dev_fwnode(&dev->dev);
+ pi.parent = &dev->dev;
+ pi.name = "pxa2xx-spi";
+ pi.id = ssp->port_id;
+ pi.data = &spi_pdata;
+ pi.size_data = sizeof(spi_pdata);
+
+ pdev = platform_device_register_full(&pi);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ pci_set_drvdata(dev, pdev);
+
+ return 0;
+}
+
+static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
+{
+ struct platform_device *pdev = pci_get_drvdata(dev);
+
+ platform_device_unregister(pdev);
+}
+
+static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
+ { PCI_DEVICE_DATA(INTEL, QUARK_X1000, &qrk_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BYT, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, MRFLD, &mrfld_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BSW0, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BSW1, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, BSW2, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, CE4100, &ce4100_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT0_0, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT0_1, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT1_0, &lpss_info_config) },
+ { PCI_DEVICE_DATA(INTEL, LPT1_1, &lpss_info_config) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
+
+static struct pci_driver pxa2xx_spi_pci_driver = {
+ .name = "pxa2xx_spi_pci",
+ .id_table = pxa2xx_spi_pci_devices,
+ .probe = pxa2xx_spi_pci_probe,
+ .remove = pxa2xx_spi_pci_remove,
+};
+
+module_pci_driver(pxa2xx_spi_pci_driver);
+
+MODULE_DESCRIPTION("CE4100/LPSS PCI-SPI glue code for PXA's driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
new file mode 100644
index 000000000..2bf21c2e7
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx.c
@@ -0,0 +1,1893 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ * Copyright (C) 2013, 2021 Intel Corporation
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
+
+#include "spi-pxa2xx.h"
+
+MODULE_AUTHOR("Stephen Street");
+MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-spi");
+
+#define TIMOUT_DFLT 1000
+
+/*
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables, the PXA270 developer
+ * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
+ * list, but the PXA255 developer manual says all bits without really meaning
+ * the service and interrupt enables.
+ */
+#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+ | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+ | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+ | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+ | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
+ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+#define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF \
+ | QUARK_X1000_SSCR1_EFWR \
+ | QUARK_X1000_SSCR1_RFT \
+ | QUARK_X1000_SSCR1_TFT \
+ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+#define CE4100_SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+ | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+ | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+ | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+ | CE4100_SSCR1_RFT | CE4100_SSCR1_TFT | SSCR1_MWDS \
+ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+#define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
+#define LPSS_CS_CONTROL_SW_MODE BIT(0)
+#define LPSS_CS_CONTROL_CS_HIGH BIT(1)
+#define LPSS_CAPS_CS_EN_SHIFT 9
+#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
+
+#define LPSS_PRIV_CLOCK_GATE 0x38
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
+
+struct lpss_config {
+ /* LPSS offset from drv_data->ioaddr */
+ unsigned offset;
+ /* Register offsets from drv_data->lpss_base or -1 */
+ int reg_general;
+ int reg_ssp;
+ int reg_cs_ctrl;
+ int reg_capabilities;
+ /* FIFO thresholds */
+ u32 rx_threshold;
+ u32 tx_threshold_lo;
+ u32 tx_threshold_hi;
+ /* Chip select control */
+ unsigned cs_sel_shift;
+ unsigned cs_sel_mask;
+ unsigned cs_num;
+ /* Quirks */
+ unsigned cs_clk_stays_gated : 1;
+};
+
+/* Keep these sorted with enum pxa_ssp_type */
+static const struct lpss_config lpss_platforms[] = {
+ { /* LPSS_LPT_SSP */
+ .offset = 0x800,
+ .reg_general = 0x08,
+ .reg_ssp = 0x0c,
+ .reg_cs_ctrl = 0x18,
+ .reg_capabilities = -1,
+ .rx_threshold = 64,
+ .tx_threshold_lo = 160,
+ .tx_threshold_hi = 224,
+ },
+ { /* LPSS_BYT_SSP */
+ .offset = 0x400,
+ .reg_general = 0x08,
+ .reg_ssp = 0x0c,
+ .reg_cs_ctrl = 0x18,
+ .reg_capabilities = -1,
+ .rx_threshold = 64,
+ .tx_threshold_lo = 160,
+ .tx_threshold_hi = 224,
+ },
+ { /* LPSS_BSW_SSP */
+ .offset = 0x400,
+ .reg_general = 0x08,
+ .reg_ssp = 0x0c,
+ .reg_cs_ctrl = 0x18,
+ .reg_capabilities = -1,
+ .rx_threshold = 64,
+ .tx_threshold_lo = 160,
+ .tx_threshold_hi = 224,
+ .cs_sel_shift = 2,
+ .cs_sel_mask = 1 << 2,
+ .cs_num = 2,
+ },
+ { /* LPSS_SPT_SSP */
+ .offset = 0x200,
+ .reg_general = -1,
+ .reg_ssp = 0x20,
+ .reg_cs_ctrl = 0x24,
+ .reg_capabilities = -1,
+ .rx_threshold = 1,
+ .tx_threshold_lo = 32,
+ .tx_threshold_hi = 56,
+ },
+ { /* LPSS_BXT_SSP */
+ .offset = 0x200,
+ .reg_general = -1,
+ .reg_ssp = 0x20,
+ .reg_cs_ctrl = 0x24,
+ .reg_capabilities = 0xfc,
+ .rx_threshold = 1,
+ .tx_threshold_lo = 16,
+ .tx_threshold_hi = 48,
+ .cs_sel_shift = 8,
+ .cs_sel_mask = 3 << 8,
+ .cs_clk_stays_gated = true,
+ },
+ { /* LPSS_CNL_SSP */
+ .offset = 0x200,
+ .reg_general = -1,
+ .reg_ssp = 0x20,
+ .reg_cs_ctrl = 0x24,
+ .reg_capabilities = 0xfc,
+ .rx_threshold = 1,
+ .tx_threshold_lo = 32,
+ .tx_threshold_hi = 56,
+ .cs_sel_shift = 8,
+ .cs_sel_mask = 3 << 8,
+ .cs_clk_stays_gated = true,
+ },
+};
+
+static inline const struct lpss_config
+*lpss_get_config(const struct driver_data *drv_data)
+{
+ return &lpss_platforms[drv_data->ssp_type - LPSS_LPT_SSP];
+}
+
+static bool is_lpss_ssp(const struct driver_data *drv_data)
+{
+ switch (drv_data->ssp_type) {
+ case LPSS_LPT_SSP:
+ case LPSS_BYT_SSP:
+ case LPSS_BSW_SSP:
+ case LPSS_SPT_SSP:
+ case LPSS_BXT_SSP:
+ case LPSS_CNL_SSP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == QUARK_X1000_SSP;
+}
+
+static bool is_mmp2_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == MMP2_SSP;
+}
+
+static bool is_mrfld_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == MRFLD_SSP;
+}
+
+static void pxa2xx_spi_update(const struct driver_data *drv_data, u32 reg, u32 mask, u32 value)
+{
+ if ((pxa2xx_spi_read(drv_data, reg) & mask) != value)
+ pxa2xx_spi_write(drv_data, reg, value & mask);
+}
+
+static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ return QUARK_X1000_SSCR1_CHANGE_MASK;
+ case CE4100_SSP:
+ return CE4100_SSCR1_CHANGE_MASK;
+ default:
+ return SSCR1_CHANGE_MASK;
+ }
+}
+
+static u32
+pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ return RX_THRESH_QUARK_X1000_DFLT;
+ case CE4100_SSP:
+ return RX_THRESH_CE4100_DFLT;
+ default:
+ return RX_THRESH_DFLT;
+ }
+}
+
+static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
+{
+ u32 mask;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ mask = QUARK_X1000_SSSR_TFL_MASK;
+ break;
+ case CE4100_SSP:
+ mask = CE4100_SSSR_TFL_MASK;
+ break;
+ default:
+ mask = SSSR_TFL_MASK;
+ break;
+ }
+
+ return read_SSSR_bits(drv_data, mask) == mask;
+}
+
+static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
+ u32 *sccr1_reg)
+{
+ u32 mask;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ mask = QUARK_X1000_SSCR1_RFT;
+ break;
+ case CE4100_SSP:
+ mask = CE4100_SSCR1_RFT;
+ break;
+ default:
+ mask = SSCR1_RFT;
+ break;
+ }
+ *sccr1_reg &= ~mask;
+}
+
+static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
+ u32 *sccr1_reg, u32 threshold)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ *sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
+ break;
+ case CE4100_SSP:
+ *sccr1_reg |= CE4100_SSCR1_RxTresh(threshold);
+ break;
+ default:
+ *sccr1_reg |= SSCR1_RxTresh(threshold);
+ break;
+ }
+}
+
+static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
+ u32 clk_div, u8 bits)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ return clk_div
+ | QUARK_X1000_SSCR0_Motorola
+ | QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits);
+ default:
+ return clk_div
+ | SSCR0_Motorola
+ | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
+ | (bits > 16 ? SSCR0_EDSS : 0);
+ }
+}
+
+/*
+ * Read and write LPSS SSP private registers. Caller must first check that
+ * is_lpss_ssp() returns true before these can be called.
+ */
+static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
+{
+ WARN_ON(!drv_data->lpss_base);
+ return readl(drv_data->lpss_base + offset);
+}
+
+static void __lpss_ssp_write_priv(struct driver_data *drv_data,
+ unsigned offset, u32 value)
+{
+ WARN_ON(!drv_data->lpss_base);
+ writel(value, drv_data->lpss_base + offset);
+}
+
+/*
+ * lpss_ssp_setup - perform LPSS SSP specific setup
+ * @drv_data: pointer to the driver private data
+ *
+ * Perform LPSS SSP specific setup. This function must be called first if
+ * one is going to use LPSS SSP private registers.
+ */
+static void lpss_ssp_setup(struct driver_data *drv_data)
+{
+ const struct lpss_config *config;
+ u32 value;
+
+ config = lpss_get_config(drv_data);
+ drv_data->lpss_base = drv_data->ssp->mmio_base + config->offset;
+
+ /* Enable software chip select control */
+ value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
+ value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH);
+ value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+
+ /* Enable multiblock DMA transfers */
+ if (drv_data->controller_info->enable_dma) {
+ __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
+
+ if (config->reg_general >= 0) {
+ value = __lpss_ssp_read_priv(drv_data,
+ config->reg_general);
+ value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
+ __lpss_ssp_write_priv(drv_data,
+ config->reg_general, value);
+ }
+ }
+}
+
+static void lpss_ssp_select_cs(struct spi_device *spi,
+ const struct lpss_config *config)
+{
+ struct driver_data *drv_data =
+ spi_controller_get_devdata(spi->controller);
+ u32 value, cs;
+
+ if (!config->cs_sel_mask)
+ return;
+
+ value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
+
+ cs = spi->chip_select;
+ cs <<= config->cs_sel_shift;
+ if (cs != (value & config->cs_sel_mask)) {
+ /*
+ * When switching another chip select output active the
+ * output must be selected first and wait 2 ssp_clk cycles
+ * before changing state to active. Otherwise a short
+ * glitch will occur on the previous chip select since
+ * output select is latched but state control is not.
+ */
+ value &= ~config->cs_sel_mask;
+ value |= cs;
+ __lpss_ssp_write_priv(drv_data,
+ config->reg_cs_ctrl, value);
+ ndelay(1000000000 /
+ (drv_data->controller->max_speed_hz / 2));
+ }
+}
+
+static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
+{
+ struct driver_data *drv_data =
+ spi_controller_get_devdata(spi->controller);
+ const struct lpss_config *config;
+ u32 value;
+
+ config = lpss_get_config(drv_data);
+
+ if (enable)
+ lpss_ssp_select_cs(spi, config);
+
+ value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
+ if (enable)
+ value &= ~LPSS_CS_CONTROL_CS_HIGH;
+ else
+ value |= LPSS_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+ if (config->cs_clk_stays_gated) {
+ u32 clkgate;
+
+ /*
+ * Changing CS alone when dynamic clock gating is on won't
+ * actually flip CS at that time. This ruins SPI transfers
+ * that specify delays, or have no data. Toggle the clock mode
+ * to force on briefly to poke the CS pin to move.
+ */
+ clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE);
+ value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) |
+ LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON;
+
+ __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value);
+ __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate);
+ }
+}
+
+static void cs_assert(struct spi_device *spi)
+{
+ struct driver_data *drv_data =
+ spi_controller_get_devdata(spi->controller);
+
+ if (drv_data->ssp_type == CE4100_SSP) {
+ pxa2xx_spi_write(drv_data, SSSR, spi->chip_select);
+ return;
+ }
+
+ if (is_lpss_ssp(drv_data))
+ lpss_ssp_cs_control(spi, true);
+}
+
+static void cs_deassert(struct spi_device *spi)
+{
+ struct driver_data *drv_data =
+ spi_controller_get_devdata(spi->controller);
+ unsigned long timeout;
+
+ if (drv_data->ssp_type == CE4100_SSP)
+ return;
+
+ /* Wait until SSP becomes idle before deasserting the CS */
+ timeout = jiffies + msecs_to_jiffies(10);
+ while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY &&
+ !time_after(jiffies, timeout))
+ cpu_relax();
+
+ if (is_lpss_ssp(drv_data))
+ lpss_ssp_cs_control(spi, false);
+}
+
+static void pxa2xx_spi_set_cs(struct spi_device *spi, bool level)
+{
+ if (level)
+ cs_deassert(spi);
+ else
+ cs_assert(spi);
+}
+
+int pxa2xx_spi_flush(struct driver_data *drv_data)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ do {
+ while (read_SSSR_bits(drv_data, SSSR_RNE))
+ pxa2xx_spi_read(drv_data, SSDR);
+ } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
+ write_SSSR_CS(drv_data, SSSR_ROR);
+
+ return limit;
+}
+
+static void pxa2xx_spi_off(struct driver_data *drv_data)
+{
+ /* On MMP, disabling SSE seems to corrupt the Rx FIFO */
+ if (is_mmp2_ssp(drv_data))
+ return;
+
+ pxa_ssp_disable(drv_data->ssp);
+}
+
+static int null_writer(struct driver_data *drv_data)
+{
+ u8 n_bytes = drv_data->n_bytes;
+
+ if (pxa2xx_spi_txfifo_full(drv_data)
+ || (drv_data->tx == drv_data->tx_end))
+ return 0;
+
+ pxa2xx_spi_write(drv_data, SSDR, 0);
+ drv_data->tx += n_bytes;
+
+ return 1;
+}
+
+static int null_reader(struct driver_data *drv_data)
+{
+ u8 n_bytes = drv_data->n_bytes;
+
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
+ pxa2xx_spi_read(drv_data, SSDR);
+ drv_data->rx += n_bytes;
+ }
+
+ return drv_data->rx == drv_data->rx_end;
+}
+
+static int u8_writer(struct driver_data *drv_data)
+{
+ if (pxa2xx_spi_txfifo_full(drv_data)
+ || (drv_data->tx == drv_data->tx_end))
+ return 0;
+
+ pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
+ ++drv_data->tx;
+
+ return 1;
+}
+
+static int u8_reader(struct driver_data *drv_data)
+{
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
+ *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
+ ++drv_data->rx;
+ }
+
+ return drv_data->rx == drv_data->rx_end;
+}
+
+static int u16_writer(struct driver_data *drv_data)
+{
+ if (pxa2xx_spi_txfifo_full(drv_data)
+ || (drv_data->tx == drv_data->tx_end))
+ return 0;
+
+ pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
+ drv_data->tx += 2;
+
+ return 1;
+}
+
+static int u16_reader(struct driver_data *drv_data)
+{
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
+ *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
+ drv_data->rx += 2;
+ }
+
+ return drv_data->rx == drv_data->rx_end;
+}
+
+static int u32_writer(struct driver_data *drv_data)
+{
+ if (pxa2xx_spi_txfifo_full(drv_data)
+ || (drv_data->tx == drv_data->tx_end))
+ return 0;
+
+ pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
+ drv_data->tx += 4;
+
+ return 1;
+}
+
+static int u32_reader(struct driver_data *drv_data)
+{
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
+ *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
+ drv_data->rx += 4;
+ }
+
+ return drv_data->rx == drv_data->rx_end;
+}
+
+static void reset_sccr1(struct driver_data *drv_data)
+{
+ u32 mask = drv_data->int_cr1 | drv_data->dma_cr1, threshold;
+ struct chip_data *chip;
+
+ if (drv_data->controller->cur_msg) {
+ chip = spi_get_ctldata(drv_data->controller->cur_msg->spi);
+ threshold = chip->threshold;
+ } else {
+ threshold = 0;
+ }
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ mask |= QUARK_X1000_SSCR1_RFT;
+ break;
+ case CE4100_SSP:
+ mask |= CE4100_SSCR1_RFT;
+ break;
+ default:
+ mask |= SSCR1_RFT;
+ break;
+ }
+
+ pxa2xx_spi_update(drv_data, SSCR1, mask, threshold);
+}
+
+static void int_stop_and_reset(struct driver_data *drv_data)
+{
+ /* Clear and disable interrupts */
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ reset_sccr1(drv_data);
+ if (pxa25x_ssp_comp(drv_data))
+ return;
+
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+}
+
+static void int_error_stop(struct driver_data *drv_data, const char *msg, int err)
+{
+ int_stop_and_reset(drv_data);
+ pxa2xx_spi_flush(drv_data);
+ pxa2xx_spi_off(drv_data);
+
+ dev_err(drv_data->ssp->dev, "%s\n", msg);
+
+ drv_data->controller->cur_msg->status = err;
+ spi_finalize_current_transfer(drv_data->controller);
+}
+
+static void int_transfer_complete(struct driver_data *drv_data)
+{
+ int_stop_and_reset(drv_data);
+
+ spi_finalize_current_transfer(drv_data->controller);
+}
+
+static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
+{
+ u32 irq_status;
+
+ irq_status = read_SSSR_bits(drv_data, drv_data->mask_sr);
+ if (!(pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE))
+ irq_status &= ~SSSR_TFS;
+
+ if (irq_status & SSSR_ROR) {
+ int_error_stop(drv_data, "interrupt_transfer: FIFO overrun", -EIO);
+ return IRQ_HANDLED;
+ }
+
+ if (irq_status & SSSR_TUR) {
+ int_error_stop(drv_data, "interrupt_transfer: FIFO underrun", -EIO);
+ return IRQ_HANDLED;
+ }
+
+ if (irq_status & SSSR_TINT) {
+ pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
+ if (drv_data->read(drv_data)) {
+ int_transfer_complete(drv_data);
+ return IRQ_HANDLED;
+ }
+ }
+
+ /* Drain Rx FIFO, Fill Tx FIFO and prevent overruns */
+ do {
+ if (drv_data->read(drv_data)) {
+ int_transfer_complete(drv_data);
+ return IRQ_HANDLED;
+ }
+ } while (drv_data->write(drv_data));
+
+ if (drv_data->read(drv_data)) {
+ int_transfer_complete(drv_data);
+ return IRQ_HANDLED;
+ }
+
+ if (drv_data->tx == drv_data->tx_end) {
+ u32 bytes_left;
+ u32 sccr1_reg;
+
+ sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
+ sccr1_reg &= ~SSCR1_TIE;
+
+ /*
+ * PXA25x_SSP has no timeout, set up Rx threshold for
+ * the remaining Rx bytes.
+ */
+ if (pxa25x_ssp_comp(drv_data)) {
+ u32 rx_thre;
+
+ pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
+
+ bytes_left = drv_data->rx_end - drv_data->rx;
+ switch (drv_data->n_bytes) {
+ case 4:
+ bytes_left >>= 2;
+ break;
+ case 2:
+ bytes_left >>= 1;
+ break;
+ }
+
+ rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
+ if (rx_thre > bytes_left)
+ rx_thre = bytes_left;
+
+ pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
+ }
+ pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
+ }
+
+ /* We did something */
+ return IRQ_HANDLED;
+}
+
+static void handle_bad_msg(struct driver_data *drv_data)
+{
+ int_stop_and_reset(drv_data);
+ pxa2xx_spi_off(drv_data);
+
+ dev_err(drv_data->ssp->dev, "bad message state in interrupt handler\n");
+}
+
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+ struct driver_data *drv_data = dev_id;
+ u32 sccr1_reg;
+ u32 mask = drv_data->mask_sr;
+ u32 status;
+
+ /*
+ * The IRQ might be shared with other peripherals so we must first
+ * check that are we RPM suspended or not. If we are we assume that
+ * the IRQ was not for us (we shouldn't be RPM suspended when the
+ * interrupt is enabled).
+ */
+ if (pm_runtime_suspended(drv_data->ssp->dev))
+ return IRQ_NONE;
+
+ /*
+ * If the device is not yet in RPM suspended state and we get an
+ * interrupt that is meant for another device, check if status bits
+ * are all set to one. That means that the device is already
+ * powered off.
+ */
+ status = pxa2xx_spi_read(drv_data, SSSR);
+ if (status == ~0)
+ return IRQ_NONE;
+
+ sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
+
+ /* Ignore possible writes if we don't need to write */
+ if (!(sccr1_reg & SSCR1_TIE))
+ mask &= ~SSSR_TFS;
+
+ /* Ignore RX timeout interrupt if it is disabled */
+ if (!(sccr1_reg & SSCR1_TINTE))
+ mask &= ~SSSR_TINT;
+
+ if (!(status & mask))
+ return IRQ_NONE;
+
+ pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1);
+ pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
+
+ if (!drv_data->controller->cur_msg) {
+ handle_bad_msg(drv_data);
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ return drv_data->transfer_handler(drv_data);
+}
+
+/*
+ * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply
+ * input frequency by fractions of 2^24. It also has a divider by 5.
+ *
+ * There are formulas to get baud rate value for given input frequency and
+ * divider parameters, such as DDS_CLK_RATE and SCR:
+ *
+ * Fsys = 200MHz
+ *
+ * Fssp = Fsys * DDS_CLK_RATE / 2^24 (1)
+ * Baud rate = Fsclk = Fssp / (2 * (SCR + 1)) (2)
+ *
+ * DDS_CLK_RATE either 2^n or 2^n / 5.
+ * SCR is in range 0 .. 255
+ *
+ * Divisor = 5^i * 2^j * 2 * k
+ * i = [0, 1] i = 1 iff j = 0 or j > 3
+ * j = [0, 23] j = 0 iff i = 1
+ * k = [1, 256]
+ * Special case: j = 0, i = 1: Divisor = 2 / 5
+ *
+ * Accordingly to the specification the recommended values for DDS_CLK_RATE
+ * are:
+ * Case 1: 2^n, n = [0, 23]
+ * Case 2: 2^24 * 2 / 5 (0x666666)
+ * Case 3: less than or equal to 2^24 / 5 / 16 (0x33333)
+ *
+ * In all cases the lowest possible value is better.
+ *
+ * The function calculates parameters for all cases and chooses the one closest
+ * to the asked baud rate.
+ */
+static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
+{
+ unsigned long xtal = 200000000;
+ unsigned long fref = xtal / 2; /* mandatory division by 2,
+ see (2) */
+ /* case 3 */
+ unsigned long fref1 = fref / 2; /* case 1 */
+ unsigned long fref2 = fref * 2 / 5; /* case 2 */
+ unsigned long scale;
+ unsigned long q, q1, q2;
+ long r, r1, r2;
+ u32 mul;
+
+ /* Case 1 */
+
+ /* Set initial value for DDS_CLK_RATE */
+ mul = (1 << 24) >> 1;
+
+ /* Calculate initial quot */
+ q1 = DIV_ROUND_UP(fref1, rate);
+
+ /* Scale q1 if it's too big */
+ if (q1 > 256) {
+ /* Scale q1 to range [1, 512] */
+ scale = fls_long(q1 - 1);
+ if (scale > 9) {
+ q1 >>= scale - 9;
+ mul >>= scale - 9;
+ }
+
+ /* Round the result if we have a remainder */
+ q1 += q1 & 1;
+ }
+
+ /* Decrease DDS_CLK_RATE as much as we can without loss in precision */
+ scale = __ffs(q1);
+ q1 >>= scale;
+ mul >>= scale;
+
+ /* Get the remainder */
+ r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate);
+
+ /* Case 2 */
+
+ q2 = DIV_ROUND_UP(fref2, rate);
+ r2 = abs(fref2 / q2 - rate);
+
+ /*
+ * Choose the best between two: less remainder we have the better. We
+ * can't go case 2 if q2 is greater than 256 since SCR register can
+ * hold only values 0 .. 255.
+ */
+ if (r2 >= r1 || q2 > 256) {
+ /* case 1 is better */
+ r = r1;
+ q = q1;
+ } else {
+ /* case 2 is better */
+ r = r2;
+ q = q2;
+ mul = (1 << 24) * 2 / 5;
+ }
+
+ /* Check case 3 only if the divisor is big enough */
+ if (fref / rate >= 80) {
+ u64 fssp;
+ u32 m;
+
+ /* Calculate initial quot */
+ q1 = DIV_ROUND_UP(fref, rate);
+ m = (1 << 24) / q1;
+
+ /* Get the remainder */
+ fssp = (u64)fref * m;
+ do_div(fssp, 1 << 24);
+ r1 = abs(fssp - rate);
+
+ /* Choose this one if it suits better */
+ if (r1 < r) {
+ /* case 3 is better */
+ q = 1;
+ mul = m;
+ }
+ }
+
+ *dds = mul;
+ return q - 1;
+}
+
+static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
+{
+ unsigned long ssp_clk = drv_data->controller->max_speed_hz;
+ const struct ssp_device *ssp = drv_data->ssp;
+
+ rate = min_t(int, ssp_clk, rate);
+
+ /*
+ * Calculate the divisor for the SCR (Serial Clock Rate), avoiding
+ * that the SSP transmission rate can be greater than the device rate.
+ */
+ if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
+ return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
+ else
+ return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff;
+}
+
+static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
+ int rate)
+{
+ struct chip_data *chip =
+ spi_get_ctldata(drv_data->controller->cur_msg->spi);
+ unsigned int clk_div;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate);
+ break;
+ default:
+ clk_div = ssp_get_clk_div(drv_data, rate);
+ break;
+ }
+ return clk_div << 8;
+}
+
+static bool pxa2xx_spi_can_dma(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ return chip->enable_dma &&
+ xfer->len <= MAX_DMA_LEN &&
+ xfer->len >= chip->dma_burst_size;
+}
+
+static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
+ struct spi_message *message = controller->cur_msg;
+ struct chip_data *chip = spi_get_ctldata(spi);
+ u32 dma_thresh = chip->dma_threshold;
+ u32 dma_burst = chip->dma_burst_size;
+ u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
+ u32 clk_div;
+ u8 bits;
+ u32 speed;
+ u32 cr0;
+ u32 cr1;
+ int err;
+ int dma_mapped;
+
+ /* Check if we can DMA this transfer */
+ if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
+
+ /* Reject already-mapped transfers; PIO won't always work */
+ if (message->is_dma_mapped
+ || transfer->rx_dma || transfer->tx_dma) {
+ dev_err(&spi->dev,
+ "Mapped transfer length of %u is greater than %d\n",
+ transfer->len, MAX_DMA_LEN);
+ return -EINVAL;
+ }
+
+ /* Warn ... we force this to PIO mode */
+ dev_warn_ratelimited(&spi->dev,
+ "DMA disabled for transfer length %u greater than %d\n",
+ transfer->len, MAX_DMA_LEN);
+ }
+
+ /* Setup the transfer state based on the type of transfer */
+ if (pxa2xx_spi_flush(drv_data) == 0) {
+ dev_err(&spi->dev, "Flush failed\n");
+ return -EIO;
+ }
+ drv_data->tx = (void *)transfer->tx_buf;
+ drv_data->tx_end = drv_data->tx + transfer->len;
+ drv_data->rx = transfer->rx_buf;
+ drv_data->rx_end = drv_data->rx + transfer->len;
+
+ /* Change speed and bit per word on a per transfer */
+ bits = transfer->bits_per_word;
+ speed = transfer->speed_hz;
+
+ clk_div = pxa2xx_ssp_get_clk_div(drv_data, speed);
+
+ if (bits <= 8) {
+ drv_data->n_bytes = 1;
+ drv_data->read = drv_data->rx ? u8_reader : null_reader;
+ drv_data->write = drv_data->tx ? u8_writer : null_writer;
+ } else if (bits <= 16) {
+ drv_data->n_bytes = 2;
+ drv_data->read = drv_data->rx ? u16_reader : null_reader;
+ drv_data->write = drv_data->tx ? u16_writer : null_writer;
+ } else if (bits <= 32) {
+ drv_data->n_bytes = 4;
+ drv_data->read = drv_data->rx ? u32_reader : null_reader;
+ drv_data->write = drv_data->tx ? u32_writer : null_writer;
+ }
+ /*
+ * If bits per word is changed in DMA mode, then must check
+ * the thresholds and burst also.
+ */
+ if (chip->enable_dma) {
+ if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
+ spi,
+ bits, &dma_burst,
+ &dma_thresh))
+ dev_warn_ratelimited(&spi->dev,
+ "DMA burst size reduced to match bits_per_word\n");
+ }
+
+ dma_mapped = controller->can_dma &&
+ controller->can_dma(controller, spi, transfer) &&
+ controller->cur_msg_mapped;
+ if (dma_mapped) {
+
+ /* Ensure we have the correct interrupt handler */
+ drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
+
+ err = pxa2xx_spi_dma_prepare(drv_data, transfer);
+ if (err)
+ return err;
+
+ /* Clear status and start DMA engine */
+ cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
+ pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
+
+ pxa2xx_spi_dma_start(drv_data);
+ } else {
+ /* Ensure we have the correct interrupt handler */
+ drv_data->transfer_handler = interrupt_transfer;
+
+ /* Clear status */
+ cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ }
+
+ /* NOTE: PXA25x_SSP _could_ use external clocking ... */
+ cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
+ if (!pxa25x_ssp_comp(drv_data))
+ dev_dbg(&spi->dev, "%u Hz actual, %s\n",
+ controller->max_speed_hz
+ / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
+ dma_mapped ? "DMA" : "PIO");
+ else
+ dev_dbg(&spi->dev, "%u Hz actual, %s\n",
+ controller->max_speed_hz / 2
+ / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
+ dma_mapped ? "DMA" : "PIO");
+
+ if (is_lpss_ssp(drv_data)) {
+ pxa2xx_spi_update(drv_data, SSIRF, GENMASK(7, 0), chip->lpss_rx_threshold);
+ pxa2xx_spi_update(drv_data, SSITF, GENMASK(15, 0), chip->lpss_tx_threshold);
+ }
+
+ if (is_mrfld_ssp(drv_data)) {
+ u32 mask = SFIFOTT_RFT | SFIFOTT_TFT;
+ u32 thresh = 0;
+
+ thresh |= SFIFOTT_RxThresh(chip->lpss_rx_threshold);
+ thresh |= SFIFOTT_TxThresh(chip->lpss_tx_threshold);
+
+ pxa2xx_spi_update(drv_data, SFIFOTT, mask, thresh);
+ }
+
+ if (is_quark_x1000_ssp(drv_data))
+ pxa2xx_spi_update(drv_data, DDS_RATE, GENMASK(23, 0), chip->dds_rate);
+
+ /* Stop the SSP */
+ if (!is_mmp2_ssp(drv_data))
+ pxa_ssp_disable(drv_data->ssp);
+
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
+
+ /* First set CR1 without interrupt and service enables */
+ pxa2xx_spi_update(drv_data, SSCR1, change_mask, cr1);
+
+ /* See if we need to reload the configuration registers */
+ pxa2xx_spi_update(drv_data, SSCR0, GENMASK(31, 0), cr0);
+
+ /* Restart the SSP */
+ pxa_ssp_enable(drv_data->ssp);
+
+ if (is_mmp2_ssp(drv_data)) {
+ u8 tx_level = read_SSSR_bits(drv_data, SSSR_TFL_MASK) >> 8;
+
+ if (tx_level) {
+ /* On MMP2, flipping SSE doesn't to empty Tx FIFO. */
+ dev_warn(&spi->dev, "%u bytes of garbage in Tx FIFO!\n", tx_level);
+ if (tx_level > transfer->len)
+ tx_level = transfer->len;
+ drv_data->tx += tx_level;
+ }
+ }
+
+ if (spi_controller_is_slave(controller)) {
+ while (drv_data->write(drv_data))
+ ;
+ if (drv_data->gpiod_ready) {
+ gpiod_set_value(drv_data->gpiod_ready, 1);
+ udelay(1);
+ gpiod_set_value(drv_data->gpiod_ready, 0);
+ }
+ }
+
+ /*
+ * Release the data by enabling service requests and interrupts,
+ * without changing any mode bits.
+ */
+ pxa2xx_spi_write(drv_data, SSCR1, cr1);
+
+ return 1;
+}
+
+static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
+{
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
+
+ int_error_stop(drv_data, "transfer aborted", -EINTR);
+
+ return 0;
+}
+
+static void pxa2xx_spi_handle_err(struct spi_controller *controller,
+ struct spi_message *msg)
+{
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
+
+ int_stop_and_reset(drv_data);
+
+ /* Disable the SSP */
+ pxa2xx_spi_off(drv_data);
+
+ /*
+ * Stop the DMA if running. Note DMA callback handler may have unset
+ * the dma_running already, which is fine as stopping is not needed
+ * then but we shouldn't rely this flag for anything else than
+ * stopping. For instance to differentiate between PIO and DMA
+ * transfers.
+ */
+ if (atomic_read(&drv_data->dma_running))
+ pxa2xx_spi_dma_stop(drv_data);
+}
+
+static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
+{
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
+
+ /* Disable the SSP now */
+ pxa2xx_spi_off(drv_data);
+
+ return 0;
+}
+
+static int setup(struct spi_device *spi)
+{
+ struct pxa2xx_spi_chip *chip_info;
+ struct chip_data *chip;
+ const struct lpss_config *config;
+ struct driver_data *drv_data =
+ spi_controller_get_devdata(spi->controller);
+ uint tx_thres, tx_hi_thres, rx_thres;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ tx_thres = TX_THRESH_QUARK_X1000_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_QUARK_X1000_DFLT;
+ break;
+ case MRFLD_SSP:
+ tx_thres = TX_THRESH_MRFLD_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_MRFLD_DFLT;
+ break;
+ case CE4100_SSP:
+ tx_thres = TX_THRESH_CE4100_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_CE4100_DFLT;
+ break;
+ case LPSS_LPT_SSP:
+ case LPSS_BYT_SSP:
+ case LPSS_BSW_SSP:
+ case LPSS_SPT_SSP:
+ case LPSS_BXT_SSP:
+ case LPSS_CNL_SSP:
+ config = lpss_get_config(drv_data);
+ tx_thres = config->tx_threshold_lo;
+ tx_hi_thres = config->tx_threshold_hi;
+ rx_thres = config->rx_threshold;
+ break;
+ default:
+ tx_hi_thres = 0;
+ if (spi_controller_is_slave(drv_data->controller)) {
+ tx_thres = 1;
+ rx_thres = 2;
+ } else {
+ tx_thres = TX_THRESH_DFLT;
+ rx_thres = RX_THRESH_DFLT;
+ }
+ break;
+ }
+
+ /* Only allocate on the first setup */
+ chip = spi_get_ctldata(spi);
+ if (!chip) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ if (drv_data->ssp_type == CE4100_SSP) {
+ if (spi->chip_select > 4) {
+ dev_err(&spi->dev,
+ "failed setup: cs number must not be > 4.\n");
+ kfree(chip);
+ return -EINVAL;
+ }
+ }
+ chip->enable_dma = drv_data->controller_info->enable_dma;
+ chip->timeout = TIMOUT_DFLT;
+ }
+
+ /*
+ * Protocol drivers may change the chip settings, so...
+ * if chip_info exists, use it.
+ */
+ chip_info = spi->controller_data;
+
+ /* chip_info isn't always needed */
+ if (chip_info) {
+ if (chip_info->timeout)
+ chip->timeout = chip_info->timeout;
+ if (chip_info->tx_threshold)
+ tx_thres = chip_info->tx_threshold;
+ if (chip_info->tx_hi_threshold)
+ tx_hi_thres = chip_info->tx_hi_threshold;
+ if (chip_info->rx_threshold)
+ rx_thres = chip_info->rx_threshold;
+ chip->dma_threshold = 0;
+ }
+
+ chip->cr1 = 0;
+ if (spi_controller_is_slave(drv_data->controller)) {
+ chip->cr1 |= SSCR1_SCFR;
+ chip->cr1 |= SSCR1_SCLKDIR;
+ chip->cr1 |= SSCR1_SFRMDIR;
+ chip->cr1 |= SSCR1_SPH;
+ }
+
+ if (is_lpss_ssp(drv_data)) {
+ chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
+ chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres) |
+ SSITF_TxHiThresh(tx_hi_thres);
+ }
+
+ if (is_mrfld_ssp(drv_data)) {
+ chip->lpss_rx_threshold = rx_thres;
+ chip->lpss_tx_threshold = tx_thres;
+ }
+
+ /*
+ * Set DMA burst and threshold outside of chip_info path so that if
+ * chip_info goes away after setting chip->enable_dma, the burst and
+ * threshold can still respond to changes in bits_per_word.
+ */
+ if (chip->enable_dma) {
+ /* Set up legal burst and threshold for DMA */
+ if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
+ spi->bits_per_word,
+ &chip->dma_burst_size,
+ &chip->dma_threshold)) {
+ dev_warn(&spi->dev,
+ "in setup: DMA burst size reduced to match bits_per_word\n");
+ }
+ dev_dbg(&spi->dev,
+ "in setup: DMA burst size set to %u\n",
+ chip->dma_burst_size);
+ }
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
+ & QUARK_X1000_SSCR1_RFT)
+ | (QUARK_X1000_SSCR1_TxTresh(tx_thres)
+ & QUARK_X1000_SSCR1_TFT);
+ break;
+ case CE4100_SSP:
+ chip->threshold = (CE4100_SSCR1_RxTresh(rx_thres) & CE4100_SSCR1_RFT) |
+ (CE4100_SSCR1_TxTresh(tx_thres) & CE4100_SSCR1_TFT);
+ break;
+ default:
+ chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
+ (SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
+ break;
+ }
+
+ chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
+ chip->cr1 |= ((spi->mode & SPI_CPHA) ? SSCR1_SPH : 0) |
+ ((spi->mode & SPI_CPOL) ? SSCR1_SPO : 0);
+
+ if (spi->mode & SPI_LOOP)
+ chip->cr1 |= SSCR1_LBM;
+
+ spi_set_ctldata(spi, chip);
+
+ return 0;
+}
+
+static void cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ kfree(chip);
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ { "INT33C0", LPSS_LPT_SSP },
+ { "INT33C1", LPSS_LPT_SSP },
+ { "INT3430", LPSS_LPT_SSP },
+ { "INT3431", LPSS_LPT_SSP },
+ { "80860F0E", LPSS_BYT_SSP },
+ { "8086228E", LPSS_BSW_SSP },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+#endif
+
+/*
+ * PCI IDs of compound devices that integrate both host controller and private
+ * integrated DMA engine. Please note these are not used in module
+ * autoloading and probing in this module but matching the LPSS SSP type.
+ */
+static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
+ /* SPT-LP */
+ { PCI_VDEVICE(INTEL, 0x9d29), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0x9d2a), LPSS_SPT_SSP },
+ /* SPT-H */
+ { PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
+ /* KBL-H */
+ { PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
+ /* CML-V */
+ { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP },
+ /* BXT A-Step */
+ { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP },
+ /* BXT B-Step */
+ { PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
+ /* GLK */
+ { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
+ /* ICL-LP */
+ { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
+ /* EHL */
+ { PCI_VDEVICE(INTEL, 0x4b2a), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x4b2b), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x4b37), LPSS_BXT_SSP },
+ /* JSL */
+ { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP },
+ /* TGL-H */
+ { PCI_VDEVICE(INTEL, 0x43aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x43ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x43fb), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x43fd), LPSS_CNL_SSP },
+ /* ADL-P */
+ { PCI_VDEVICE(INTEL, 0x51aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x51ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x51fb), LPSS_CNL_SSP },
+ /* ADL-M */
+ { PCI_VDEVICE(INTEL, 0x54aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x54ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x54fb), LPSS_CNL_SSP },
+ /* APL */
+ { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
+ /* RPL-S */
+ { PCI_VDEVICE(INTEL, 0x7a2a), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7a2b), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7a79), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7a7b), LPSS_CNL_SSP },
+ /* ADL-S */
+ { PCI_VDEVICE(INTEL, 0x7aaa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7aab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7af9), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7afb), LPSS_CNL_SSP },
+ /* MTL-P */
+ { PCI_VDEVICE(INTEL, 0x7e27), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7e30), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x7e46), LPSS_CNL_SSP },
+ /* CNL-LP */
+ { PCI_VDEVICE(INTEL, 0x9daa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x9dab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x9dfb), LPSS_CNL_SSP },
+ /* CNL-H */
+ { PCI_VDEVICE(INTEL, 0xa32a), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa32b), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa37b), LPSS_CNL_SSP },
+ /* CML-LP */
+ { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
+ /* CML-H */
+ { PCI_VDEVICE(INTEL, 0x06aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x06ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x06fb), LPSS_CNL_SSP },
+ /* TGL-LP */
+ { PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0de), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0df), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0fb), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0fd), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0fe), LPSS_CNL_SSP },
+ { },
+};
+
+static const struct of_device_id pxa2xx_spi_of_match[] = {
+ { .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
+
+#ifdef CONFIG_PCI
+
+static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
+{
+ return param == chan->device->dev;
+}
+
+#endif /* CONFIG_PCI */
+
+static struct pxa2xx_spi_controller *
+pxa2xx_spi_init_pdata(struct platform_device *pdev)
+{
+ struct pxa2xx_spi_controller *pdata;
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+ struct ssp_device *ssp;
+ struct resource *res;
+ struct pci_dev *pcidev = dev_is_pci(parent) ? to_pci_dev(parent) : NULL;
+ const struct pci_device_id *pcidev_id = NULL;
+ enum pxa_ssp_type type;
+ const void *match;
+ int status;
+ u64 uid;
+
+ if (pcidev)
+ pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match, pcidev);
+
+ match = device_get_match_data(&pdev->dev);
+ if (match)
+ type = (enum pxa_ssp_type)match;
+ else if (pcidev_id)
+ type = (enum pxa_ssp_type)pcidev_id->driver_data;
+ else
+ return ERR_PTR(-EINVAL);
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ ssp = &pdata->ssp;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ssp->mmio_base))
+ return ERR_CAST(ssp->mmio_base);
+
+ ssp->phys_base = res->start;
+
+#ifdef CONFIG_PCI
+ if (pcidev_id) {
+ pdata->tx_param = parent;
+ pdata->rx_param = parent;
+ pdata->dma_filter = pxa2xx_spi_idma_filter;
+ }
+#endif
+
+ ssp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ssp->clk))
+ return ERR_CAST(ssp->clk);
+
+ ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0)
+ return ERR_PTR(ssp->irq);
+
+ ssp->type = type;
+ ssp->dev = &pdev->dev;
+
+ status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
+ if (status)
+ ssp->port_id = -1;
+ else
+ ssp->port_id = uid;
+
+ pdata->is_slave = device_property_read_bool(&pdev->dev, "spi-slave");
+ pdata->num_chipselect = 1;
+ pdata->enable_dma = true;
+ pdata->dma_burst_size = 1;
+
+ return pdata;
+}
+
+static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
+ unsigned int cs)
+{
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
+
+ if (has_acpi_companion(drv_data->ssp->dev)) {
+ switch (drv_data->ssp_type) {
+ /*
+ * For Atoms the ACPI DeviceSelection used by the Windows
+ * driver starts from 1 instead of 0 so translate it here
+ * to match what Linux expects.
+ */
+ case LPSS_BYT_SSP:
+ case LPSS_BSW_SSP:
+ return cs - 1;
+
+ default:
+ break;
+ }
+ }
+
+ return cs;
+}
+
+static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi)
+{
+ return MAX_DMA_LEN;
+}
+
+static int pxa2xx_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pxa2xx_spi_controller *platform_info;
+ struct spi_controller *controller;
+ struct driver_data *drv_data;
+ struct ssp_device *ssp;
+ const struct lpss_config *config;
+ int status;
+ u32 tmp;
+
+ platform_info = dev_get_platdata(dev);
+ if (!platform_info) {
+ platform_info = pxa2xx_spi_init_pdata(pdev);
+ if (IS_ERR(platform_info)) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return PTR_ERR(platform_info);
+ }
+ }
+
+ ssp = pxa_ssp_request(pdev->id, pdev->name);
+ if (!ssp)
+ ssp = &platform_info->ssp;
+
+ if (!ssp->mmio_base) {
+ dev_err(&pdev->dev, "failed to get SSP\n");
+ return -ENODEV;
+ }
+
+ if (platform_info->is_slave)
+ controller = devm_spi_alloc_slave(dev, sizeof(*drv_data));
+ else
+ controller = devm_spi_alloc_master(dev, sizeof(*drv_data));
+
+ if (!controller) {
+ dev_err(&pdev->dev, "cannot alloc spi_controller\n");
+ status = -ENOMEM;
+ goto out_error_controller_alloc;
+ }
+ drv_data = spi_controller_get_devdata(controller);
+ drv_data->controller = controller;
+ drv_data->controller_info = platform_info;
+ drv_data->ssp = ssp;
+
+ device_set_node(&controller->dev, dev_fwnode(dev));
+
+ /* The spi->mode bits understood by this driver: */
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+
+ controller->bus_num = ssp->port_id;
+ controller->dma_alignment = DMA_ALIGNMENT;
+ controller->cleanup = cleanup;
+ controller->setup = setup;
+ controller->set_cs = pxa2xx_spi_set_cs;
+ controller->transfer_one = pxa2xx_spi_transfer_one;
+ controller->slave_abort = pxa2xx_spi_slave_abort;
+ controller->handle_err = pxa2xx_spi_handle_err;
+ controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
+ controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
+ controller->auto_runtime_pm = true;
+ controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+
+ drv_data->ssp_type = ssp->type;
+
+ if (pxa25x_ssp_comp(drv_data)) {
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ break;
+ default:
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ break;
+ }
+
+ drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
+ drv_data->dma_cr1 = 0;
+ drv_data->clear_sr = SSSR_ROR;
+ drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
+ } else {
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
+ drv_data->dma_cr1 = DEFAULT_DMA_CR1;
+ drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
+ drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS
+ | SSSR_ROR | SSSR_TUR;
+ }
+
+ status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
+ drv_data);
+ if (status < 0) {
+ dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
+ goto out_error_controller_alloc;
+ }
+
+ /* Setup DMA if requested */
+ if (platform_info->enable_dma) {
+ status = pxa2xx_spi_dma_setup(drv_data);
+ if (status) {
+ dev_warn(dev, "no DMA channels available, using PIO\n");
+ platform_info->enable_dma = false;
+ } else {
+ controller->can_dma = pxa2xx_spi_can_dma;
+ controller->max_dma_len = MAX_DMA_LEN;
+ controller->max_transfer_size =
+ pxa2xx_spi_max_dma_transfer_size;
+ }
+ }
+
+ /* Enable SOC clock */
+ status = clk_prepare_enable(ssp->clk);
+ if (status)
+ goto out_error_dma_irq_alloc;
+
+ controller->max_speed_hz = clk_get_rate(ssp->clk);
+ /*
+ * Set minimum speed for all other platforms than Intel Quark which is
+ * able do under 1 Hz transfers.
+ */
+ if (!pxa25x_ssp_comp(drv_data))
+ controller->min_speed_hz =
+ DIV_ROUND_UP(controller->max_speed_hz, 4096);
+ else if (!is_quark_x1000_ssp(drv_data))
+ controller->min_speed_hz =
+ DIV_ROUND_UP(controller->max_speed_hz, 512);
+
+ pxa_ssp_disable(ssp);
+
+ /* Load default SSP configuration */
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) |
+ QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
+ pxa2xx_spi_write(drv_data, SSCR1, tmp);
+
+ /* Using the Motorola SPI protocol and use 8 bit frame */
+ tmp = QUARK_X1000_SSCR0_Motorola | QUARK_X1000_SSCR0_DataSize(8);
+ pxa2xx_spi_write(drv_data, SSCR0, tmp);
+ break;
+ case CE4100_SSP:
+ tmp = CE4100_SSCR1_RxTresh(RX_THRESH_CE4100_DFLT) |
+ CE4100_SSCR1_TxTresh(TX_THRESH_CE4100_DFLT);
+ pxa2xx_spi_write(drv_data, SSCR1, tmp);
+ tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
+ pxa2xx_spi_write(drv_data, SSCR0, tmp);
+ break;
+ default:
+
+ if (spi_controller_is_slave(controller)) {
+ tmp = SSCR1_SCFR |
+ SSCR1_SCLKDIR |
+ SSCR1_SFRMDIR |
+ SSCR1_RxTresh(2) |
+ SSCR1_TxTresh(1) |
+ SSCR1_SPH;
+ } else {
+ tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
+ SSCR1_TxTresh(TX_THRESH_DFLT);
+ }
+ pxa2xx_spi_write(drv_data, SSCR1, tmp);
+ tmp = SSCR0_Motorola | SSCR0_DataSize(8);
+ if (!spi_controller_is_slave(controller))
+ tmp |= SSCR0_SCR(2);
+ pxa2xx_spi_write(drv_data, SSCR0, tmp);
+ break;
+ }
+
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+
+ if (!is_quark_x1000_ssp(drv_data))
+ pxa2xx_spi_write(drv_data, SSPSP, 0);
+
+ if (is_lpss_ssp(drv_data)) {
+ lpss_ssp_setup(drv_data);
+ config = lpss_get_config(drv_data);
+ if (config->reg_capabilities >= 0) {
+ tmp = __lpss_ssp_read_priv(drv_data,
+ config->reg_capabilities);
+ tmp &= LPSS_CAPS_CS_EN_MASK;
+ tmp >>= LPSS_CAPS_CS_EN_SHIFT;
+ platform_info->num_chipselect = ffz(tmp);
+ } else if (config->cs_num) {
+ platform_info->num_chipselect = config->cs_num;
+ }
+ }
+ controller->num_chipselect = platform_info->num_chipselect;
+ controller->use_gpio_descriptors = true;
+
+ if (platform_info->is_slave) {
+ drv_data->gpiod_ready = devm_gpiod_get_optional(dev,
+ "ready", GPIOD_OUT_LOW);
+ if (IS_ERR(drv_data->gpiod_ready)) {
+ status = PTR_ERR(drv_data->gpiod_ready);
+ goto out_error_clock_enabled;
+ }
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* Register with the SPI framework */
+ platform_set_drvdata(pdev, drv_data);
+ status = spi_register_controller(controller);
+ if (status) {
+ dev_err(&pdev->dev, "problem registering SPI controller\n");
+ goto out_error_pm_runtime_enabled;
+ }
+
+ return status;
+
+out_error_pm_runtime_enabled:
+ pm_runtime_disable(&pdev->dev);
+
+out_error_clock_enabled:
+ clk_disable_unprepare(ssp->clk);
+
+out_error_dma_irq_alloc:
+ pxa2xx_spi_dma_release(drv_data);
+ free_irq(ssp->irq, drv_data);
+
+out_error_controller_alloc:
+ pxa_ssp_free(ssp);
+ return status;
+}
+
+static int pxa2xx_spi_remove(struct platform_device *pdev)
+{
+ struct driver_data *drv_data = platform_get_drvdata(pdev);
+ struct ssp_device *ssp = drv_data->ssp;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ spi_unregister_controller(drv_data->controller);
+
+ /* Disable the SSP at the peripheral and SOC level */
+ pxa_ssp_disable(ssp);
+ clk_disable_unprepare(ssp->clk);
+
+ /* Release DMA */
+ if (drv_data->controller_info->enable_dma)
+ pxa2xx_spi_dma_release(drv_data);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ /* Release IRQ */
+ free_irq(ssp->irq, drv_data);
+
+ /* Release SSP */
+ pxa_ssp_free(ssp);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pxa2xx_spi_suspend(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+ struct ssp_device *ssp = drv_data->ssp;
+ int status;
+
+ status = spi_controller_suspend(drv_data->controller);
+ if (status)
+ return status;
+
+ pxa_ssp_disable(ssp);
+
+ if (!pm_runtime_suspended(dev))
+ clk_disable_unprepare(ssp->clk);
+
+ return 0;
+}
+
+static int pxa2xx_spi_resume(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+ struct ssp_device *ssp = drv_data->ssp;
+ int status;
+
+ /* Enable the SSP clock */
+ if (!pm_runtime_suspended(dev)) {
+ status = clk_prepare_enable(ssp->clk);
+ if (status)
+ return status;
+ }
+
+ /* Start the queue running */
+ return spi_controller_resume(drv_data->controller);
+}
+#endif
+
+#ifdef CONFIG_PM
+static int pxa2xx_spi_runtime_suspend(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(drv_data->ssp->clk);
+ return 0;
+}
+
+static int pxa2xx_spi_runtime_resume(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(drv_data->ssp->clk);
+}
+#endif
+
+static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
+ SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
+ pxa2xx_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver driver = {
+ .driver = {
+ .name = "pxa2xx-spi",
+ .pm = &pxa2xx_spi_pm_ops,
+ .acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
+ .of_match_table = of_match_ptr(pxa2xx_spi_of_match),
+ },
+ .probe = pxa2xx_spi_probe,
+ .remove = pxa2xx_spi_remove,
+};
+
+static int __init pxa2xx_spi_init(void)
+{
+ return platform_driver_register(&driver);
+}
+subsys_initcall(pxa2xx_spi_init);
+
+static void __exit pxa2xx_spi_exit(void)
+{
+ platform_driver_unregister(&driver);
+}
+module_exit(pxa2xx_spi_exit);
+
+MODULE_SOFTDEP("pre: dw_dmac");
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
new file mode 100644
index 000000000..45cdbbc71
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ * Copyright (C) 2013, 2021 Intel Corporation
+ */
+
+#ifndef SPI_PXA2XX_H
+#define SPI_PXA2XX_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#include <linux/pxa2xx_ssp.h>
+
+struct gpio_desc;
+struct pxa2xx_spi_controller;
+struct spi_controller;
+struct spi_device;
+struct spi_transfer;
+
+struct driver_data {
+ /* SSP Info */
+ struct ssp_device *ssp;
+
+ /* SPI framework hookup */
+ enum pxa_ssp_type ssp_type;
+ struct spi_controller *controller;
+
+ /* PXA hookup */
+ struct pxa2xx_spi_controller *controller_info;
+
+ /* SSP masks*/
+ u32 dma_cr1;
+ u32 int_cr1;
+ u32 clear_sr;
+ u32 mask_sr;
+
+ /* DMA engine support */
+ atomic_t dma_running;
+
+ /* Current transfer state info */
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ u8 n_bytes;
+ int (*write)(struct driver_data *drv_data);
+ int (*read)(struct driver_data *drv_data);
+ irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
+
+ void __iomem *lpss_base;
+
+ /* Optional slave FIFO ready signal */
+ struct gpio_desc *gpiod_ready;
+};
+
+struct chip_data {
+ u32 cr1;
+ u32 dds_rate;
+ u32 timeout;
+ u8 enable_dma;
+ u32 dma_burst_size;
+ u32 dma_threshold;
+ u32 threshold;
+ u16 lpss_rx_threshold;
+ u16 lpss_tx_threshold;
+};
+
+static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data, u32 reg)
+{
+ return pxa_ssp_read_reg(drv_data->ssp, reg);
+}
+
+static inline void pxa2xx_spi_write(const struct driver_data *drv_data, u32 reg, u32 val)
+{
+ pxa_ssp_write_reg(drv_data->ssp, reg, val);
+}
+
+#define DMA_ALIGNMENT 8
+
+static inline int pxa25x_ssp_comp(const struct driver_data *drv_data)
+{
+ switch (drv_data->ssp_type) {
+ case PXA25x_SSP:
+ case CE4100_SSP:
+ case QUARK_X1000_SSP:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static inline void clear_SSCR1_bits(const struct driver_data *drv_data, u32 bits)
+{
+ pxa2xx_spi_write(drv_data, SSCR1, pxa2xx_spi_read(drv_data, SSCR1) & ~bits);
+}
+
+static inline u32 read_SSSR_bits(const struct driver_data *drv_data, u32 bits)
+{
+ return pxa2xx_spi_read(drv_data, SSSR) & bits;
+}
+
+static inline void write_SSSR_CS(const struct driver_data *drv_data, u32 val)
+{
+ if (drv_data->ssp_type == CE4100_SSP ||
+ drv_data->ssp_type == QUARK_X1000_SSP)
+ val |= read_SSSR_bits(drv_data, SSSR_ALT_FRM_MASK);
+
+ pxa2xx_spi_write(drv_data, SSSR, val);
+}
+
+extern int pxa2xx_spi_flush(struct driver_data *drv_data);
+
+#define MAX_DMA_LEN SZ_64K
+#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
+
+extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
+extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
+ struct spi_transfer *xfer);
+extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
+extern void pxa2xx_spi_dma_stop(struct driver_data *drv_data);
+extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
+extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
+extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word,
+ u32 *burst_code,
+ u32 *threshold);
+
+#endif /* SPI_PXA2XX_H */
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
new file mode 100644
index 000000000..c334dfec4
--- /dev/null
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_opp.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+
+#define QSPI_NUM_CS 2
+#define QSPI_BYTES_PER_WORD 4
+
+#define MSTR_CONFIG 0x0000
+#define FULL_CYCLE_MODE BIT(3)
+#define FB_CLK_EN BIT(4)
+#define PIN_HOLDN BIT(6)
+#define PIN_WPN BIT(7)
+#define DMA_ENABLE BIT(8)
+#define BIG_ENDIAN_MODE BIT(9)
+#define SPI_MODE_MSK 0xc00
+#define SPI_MODE_SHFT 10
+#define CHIP_SELECT_NUM BIT(12)
+#define SBL_EN BIT(13)
+#define LPA_BASE_MSK 0x3c000
+#define LPA_BASE_SHFT 14
+#define TX_DATA_DELAY_MSK 0xc0000
+#define TX_DATA_DELAY_SHFT 18
+#define TX_CLK_DELAY_MSK 0x300000
+#define TX_CLK_DELAY_SHFT 20
+#define TX_CS_N_DELAY_MSK 0xc00000
+#define TX_CS_N_DELAY_SHFT 22
+#define TX_DATA_OE_DELAY_MSK 0x3000000
+#define TX_DATA_OE_DELAY_SHFT 24
+
+#define AHB_MASTER_CFG 0x0004
+#define HMEM_TYPE_START_MID_TRANS_MSK 0x7
+#define HMEM_TYPE_START_MID_TRANS_SHFT 0
+#define HMEM_TYPE_LAST_TRANS_MSK 0x38
+#define HMEM_TYPE_LAST_TRANS_SHFT 3
+#define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_MSK 0xc0
+#define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_SHFT 6
+#define HMEMTYPE_READ_TRANS_MSK 0x700
+#define HMEMTYPE_READ_TRANS_SHFT 8
+#define HSHARED BIT(11)
+#define HINNERSHARED BIT(12)
+
+#define MSTR_INT_EN 0x000C
+#define MSTR_INT_STATUS 0x0010
+#define RESP_FIFO_UNDERRUN BIT(0)
+#define RESP_FIFO_NOT_EMPTY BIT(1)
+#define RESP_FIFO_RDY BIT(2)
+#define HRESP_FROM_NOC_ERR BIT(3)
+#define WR_FIFO_EMPTY BIT(9)
+#define WR_FIFO_FULL BIT(10)
+#define WR_FIFO_OVERRUN BIT(11)
+#define TRANSACTION_DONE BIT(16)
+#define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \
+ WR_FIFO_OVERRUN)
+#define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \
+ WR_FIFO_EMPTY | WR_FIFO_FULL | \
+ TRANSACTION_DONE)
+
+#define PIO_XFER_CTRL 0x0014
+#define REQUEST_COUNT_MSK 0xffff
+
+#define PIO_XFER_CFG 0x0018
+#define TRANSFER_DIRECTION BIT(0)
+#define MULTI_IO_MODE_MSK 0xe
+#define MULTI_IO_MODE_SHFT 1
+#define TRANSFER_FRAGMENT BIT(8)
+#define SDR_1BIT 1
+#define SDR_2BIT 2
+#define SDR_4BIT 3
+#define DDR_1BIT 5
+#define DDR_2BIT 6
+#define DDR_4BIT 7
+#define DMA_DESC_SINGLE_SPI 1
+#define DMA_DESC_DUAL_SPI 2
+#define DMA_DESC_QUAD_SPI 3
+
+#define PIO_XFER_STATUS 0x001c
+#define WR_FIFO_BYTES_MSK 0xffff0000
+#define WR_FIFO_BYTES_SHFT 16
+
+#define PIO_DATAOUT_1B 0x0020
+#define PIO_DATAOUT_4B 0x0024
+
+#define RD_FIFO_CFG 0x0028
+#define CONTINUOUS_MODE BIT(0)
+
+#define RD_FIFO_STATUS 0x002c
+#define FIFO_EMPTY BIT(11)
+#define WR_CNTS_MSK 0x7f0
+#define WR_CNTS_SHFT 4
+#define RDY_64BYTE BIT(3)
+#define RDY_32BYTE BIT(2)
+#define RDY_16BYTE BIT(1)
+#define FIFO_RDY BIT(0)
+
+#define RD_FIFO_RESET 0x0030
+#define RESET_FIFO BIT(0)
+
+#define CUR_MEM_ADDR 0x0048
+#define HW_VERSION 0x004c
+#define RD_FIFO 0x0050
+#define SAMPLING_CLK_CFG 0x0090
+#define SAMPLING_CLK_STATUS 0x0094
+
+
+enum qspi_dir {
+ QSPI_READ,
+ QSPI_WRITE,
+};
+
+struct qspi_xfer {
+ union {
+ const void *tx_buf;
+ void *rx_buf;
+ };
+ unsigned int rem_bytes;
+ unsigned int buswidth;
+ enum qspi_dir dir;
+ bool is_last;
+};
+
+enum qspi_clocks {
+ QSPI_CLK_CORE,
+ QSPI_CLK_IFACE,
+ QSPI_NUM_CLKS
+};
+
+struct qcom_qspi {
+ void __iomem *base;
+ struct device *dev;
+ struct clk_bulk_data *clks;
+ struct qspi_xfer xfer;
+ struct icc_path *icc_path_cpu_to_qspi;
+ unsigned long last_speed;
+ /* Lock to protect data accessed by IRQs */
+ spinlock_t lock;
+};
+
+static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl,
+ unsigned int buswidth)
+{
+ switch (buswidth) {
+ case 1:
+ return SDR_1BIT << MULTI_IO_MODE_SHFT;
+ case 2:
+ return SDR_2BIT << MULTI_IO_MODE_SHFT;
+ case 4:
+ return SDR_4BIT << MULTI_IO_MODE_SHFT;
+ default:
+ dev_warn_once(ctrl->dev,
+ "Unexpected bus width: %u\n", buswidth);
+ return SDR_1BIT << MULTI_IO_MODE_SHFT;
+ }
+}
+
+static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
+{
+ u32 pio_xfer_cfg;
+ const struct qspi_xfer *xfer;
+
+ xfer = &ctrl->xfer;
+ pio_xfer_cfg = readl(ctrl->base + PIO_XFER_CFG);
+ pio_xfer_cfg &= ~TRANSFER_DIRECTION;
+ pio_xfer_cfg |= xfer->dir;
+ if (xfer->is_last)
+ pio_xfer_cfg &= ~TRANSFER_FRAGMENT;
+ else
+ pio_xfer_cfg |= TRANSFER_FRAGMENT;
+ pio_xfer_cfg &= ~MULTI_IO_MODE_MSK;
+ pio_xfer_cfg |= qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
+
+ writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG);
+}
+
+static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl)
+{
+ u32 pio_xfer_ctrl;
+
+ pio_xfer_ctrl = readl(ctrl->base + PIO_XFER_CTRL);
+ pio_xfer_ctrl &= ~REQUEST_COUNT_MSK;
+ pio_xfer_ctrl |= ctrl->xfer.rem_bytes;
+ writel(pio_xfer_ctrl, ctrl->base + PIO_XFER_CTRL);
+}
+
+static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
+{
+ u32 ints;
+
+ qcom_qspi_pio_xfer_cfg(ctrl);
+
+ /* Ack any previous interrupts that might be hanging around */
+ writel(QSPI_ALL_IRQS, ctrl->base + MSTR_INT_STATUS);
+
+ /* Setup new interrupts */
+ if (ctrl->xfer.dir == QSPI_WRITE)
+ ints = QSPI_ERR_IRQS | WR_FIFO_EMPTY;
+ else
+ ints = QSPI_ERR_IRQS | RESP_FIFO_RDY;
+ writel(ints, ctrl->base + MSTR_INT_EN);
+
+ /* Kick off the transfer */
+ qcom_qspi_pio_xfer_ctrl(ctrl);
+}
+
+static void qcom_qspi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ writel(0, ctrl->base + MSTR_INT_EN);
+ ctrl->xfer.rem_bytes = 0;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+}
+
+static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
+{
+ int ret;
+ unsigned int avg_bw_cpu;
+
+ if (speed_hz == ctrl->last_speed)
+ return 0;
+
+ /* In regular operation (SBL_EN=1) core must be 4x transfer clock */
+ ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Set BW quota for CPU as driver supports FIFO mode only.
+ * We don't have explicit peak requirement so keep it equal to avg_bw.
+ */
+ avg_bw_cpu = Bps_to_icc(speed_hz);
+ ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu);
+ if (ret) {
+ dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ctrl->last_speed = speed_hz;
+
+ return 0;
+}
+
+static int qcom_qspi_transfer_one(struct spi_master *master,
+ struct spi_device *slv,
+ struct spi_transfer *xfer)
+{
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ int ret;
+ unsigned long speed_hz;
+ unsigned long flags;
+
+ speed_hz = slv->max_speed_hz;
+ if (xfer->speed_hz)
+ speed_hz = xfer->speed_hz;
+
+ ret = qcom_qspi_set_speed(ctrl, speed_hz);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ /* We are half duplex, so either rx or tx will be set */
+ if (xfer->rx_buf) {
+ ctrl->xfer.dir = QSPI_READ;
+ ctrl->xfer.buswidth = xfer->rx_nbits;
+ ctrl->xfer.rx_buf = xfer->rx_buf;
+ } else {
+ ctrl->xfer.dir = QSPI_WRITE;
+ ctrl->xfer.buswidth = xfer->tx_nbits;
+ ctrl->xfer.tx_buf = xfer->tx_buf;
+ }
+ ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
+ &master->cur_msg->transfers);
+ ctrl->xfer.rem_bytes = xfer->len;
+ qcom_qspi_pio_xfer(ctrl);
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ /* We'll call spi_finalize_current_transfer() when done */
+ return 1;
+}
+
+static int qcom_qspi_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ u32 mstr_cfg;
+ struct qcom_qspi *ctrl;
+ int tx_data_oe_delay = 1;
+ int tx_data_delay = 1;
+ unsigned long flags;
+
+ ctrl = spi_master_get_devdata(master);
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
+ mstr_cfg &= ~CHIP_SELECT_NUM;
+ if (message->spi->chip_select)
+ mstr_cfg |= CHIP_SELECT_NUM;
+
+ mstr_cfg |= FB_CLK_EN | PIN_WPN | PIN_HOLDN | SBL_EN | FULL_CYCLE_MODE;
+ mstr_cfg &= ~(SPI_MODE_MSK | TX_DATA_OE_DELAY_MSK | TX_DATA_DELAY_MSK);
+ mstr_cfg |= message->spi->mode << SPI_MODE_SHFT;
+ mstr_cfg |= tx_data_oe_delay << TX_DATA_OE_DELAY_SHFT;
+ mstr_cfg |= tx_data_delay << TX_DATA_DELAY_SHFT;
+ mstr_cfg &= ~DMA_ENABLE;
+
+ writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ return 0;
+}
+
+static irqreturn_t pio_read(struct qcom_qspi *ctrl)
+{
+ u32 rd_fifo_status;
+ u32 rd_fifo;
+ unsigned int wr_cnts;
+ unsigned int bytes_to_read;
+ unsigned int words_to_read;
+ u32 *word_buf;
+ u8 *byte_buf;
+ int i;
+
+ rd_fifo_status = readl(ctrl->base + RD_FIFO_STATUS);
+
+ if (!(rd_fifo_status & FIFO_RDY)) {
+ dev_dbg(ctrl->dev, "Spurious IRQ %#x\n", rd_fifo_status);
+ return IRQ_NONE;
+ }
+
+ wr_cnts = (rd_fifo_status & WR_CNTS_MSK) >> WR_CNTS_SHFT;
+ wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes);
+
+ words_to_read = wr_cnts / QSPI_BYTES_PER_WORD;
+ bytes_to_read = wr_cnts % QSPI_BYTES_PER_WORD;
+
+ if (words_to_read) {
+ word_buf = ctrl->xfer.rx_buf;
+ ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD;
+ ioread32_rep(ctrl->base + RD_FIFO, word_buf, words_to_read);
+ ctrl->xfer.rx_buf = word_buf + words_to_read;
+ }
+
+ if (bytes_to_read) {
+ byte_buf = ctrl->xfer.rx_buf;
+ rd_fifo = readl(ctrl->base + RD_FIFO);
+ ctrl->xfer.rem_bytes -= bytes_to_read;
+ for (i = 0; i < bytes_to_read; i++)
+ *byte_buf++ = rd_fifo >> (i * BITS_PER_BYTE);
+ ctrl->xfer.rx_buf = byte_buf;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t pio_write(struct qcom_qspi *ctrl)
+{
+ const void *xfer_buf = ctrl->xfer.tx_buf;
+ const int *word_buf;
+ const char *byte_buf;
+ unsigned int wr_fifo_bytes;
+ unsigned int wr_fifo_words;
+ unsigned int wr_size;
+ unsigned int rem_words;
+
+ wr_fifo_bytes = readl(ctrl->base + PIO_XFER_STATUS);
+ wr_fifo_bytes >>= WR_FIFO_BYTES_SHFT;
+
+ if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) {
+ /* Process the last 1-3 bytes */
+ wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes);
+ ctrl->xfer.rem_bytes -= wr_size;
+
+ byte_buf = xfer_buf;
+ while (wr_size--)
+ writel(*byte_buf++,
+ ctrl->base + PIO_DATAOUT_1B);
+ ctrl->xfer.tx_buf = byte_buf;
+ } else {
+ /*
+ * Process all the whole words; to keep things simple we'll
+ * just wait for the next interrupt to handle the last 1-3
+ * bytes if we don't have an even number of words.
+ */
+ rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD;
+ wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD;
+
+ wr_size = min(rem_words, wr_fifo_words);
+ ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD;
+
+ word_buf = xfer_buf;
+ iowrite32_rep(ctrl->base + PIO_DATAOUT_4B, word_buf, wr_size);
+ ctrl->xfer.tx_buf = word_buf + wr_size;
+
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
+{
+ u32 int_status;
+ struct qcom_qspi *ctrl = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&ctrl->lock);
+
+ int_status = readl(ctrl->base + MSTR_INT_STATUS);
+ writel(int_status, ctrl->base + MSTR_INT_STATUS);
+
+ if (ctrl->xfer.dir == QSPI_WRITE) {
+ if (int_status & WR_FIFO_EMPTY)
+ ret = pio_write(ctrl);
+ } else {
+ if (int_status & RESP_FIFO_RDY)
+ ret = pio_read(ctrl);
+ }
+
+ if (int_status & QSPI_ERR_IRQS) {
+ if (int_status & RESP_FIFO_UNDERRUN)
+ dev_err(ctrl->dev, "IRQ error: FIFO underrun\n");
+ if (int_status & WR_FIFO_OVERRUN)
+ dev_err(ctrl->dev, "IRQ error: FIFO overrun\n");
+ if (int_status & HRESP_FROM_NOC_ERR)
+ dev_err(ctrl->dev, "IRQ error: NOC response error\n");
+ ret = IRQ_HANDLED;
+ }
+
+ if (!ctrl->xfer.rem_bytes) {
+ writel(0, ctrl->base + MSTR_INT_EN);
+ spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
+ }
+
+ spin_unlock(&ctrl->lock);
+ return ret;
+}
+
+static int qcom_qspi_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev;
+ struct spi_master *master;
+ struct qcom_qspi *ctrl;
+
+ dev = &pdev->dev;
+
+ master = devm_spi_alloc_master(dev, sizeof(*ctrl));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ ctrl = spi_master_get_devdata(master);
+
+ spin_lock_init(&ctrl->lock);
+ ctrl->dev = dev;
+ ctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
+
+ ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
+ sizeof(*ctrl->clks), GFP_KERNEL);
+ if (!ctrl->clks)
+ return -ENOMEM;
+
+ ctrl->clks[QSPI_CLK_CORE].id = "core";
+ ctrl->clks[QSPI_CLK_IFACE].id = "iface";
+ ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
+ if (ret)
+ return ret;
+
+ ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config");
+ if (IS_ERR(ctrl->icc_path_cpu_to_qspi))
+ return dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi),
+ "Failed to get cpu path\n");
+
+ /* Set BW vote for register access */
+ ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000),
+ Bps_to_icc(1000));
+ if (ret) {
+ dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
+ if (ret) {
+ dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ ret = devm_request_irq(dev, ret, qcom_qspi_irq, 0, dev_name(dev), ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to request irq %d\n", ret);
+ return ret;
+ }
+
+ master->max_speed_hz = 300000000;
+ master->num_chipselect = QSPI_NUM_CS;
+ master->bus_num = -1;
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_MODE_0 |
+ SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->prepare_message = qcom_qspi_prepare_message;
+ master->transfer_one = qcom_qspi_transfer_one;
+ master->handle_err = qcom_qspi_handle_err;
+ master->auto_runtime_pm = true;
+
+ ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
+ if (ret)
+ return ret;
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(&pdev->dev);
+ if (ret && ret != -ENODEV) {
+ dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+ return ret;
+ }
+
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 250);
+ pm_runtime_enable(dev);
+
+ ret = spi_register_master(master);
+ if (!ret)
+ return 0;
+
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int qcom_qspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+
+ /* Unregister _before_ disabling pm_runtime() so we stop transfers */
+ spi_unregister_master(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ int ret;
+
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(dev, 0);
+ clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks);
+
+ ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
+ if (ret) {
+ dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ int ret;
+
+ ret = icc_enable(ctrl->icc_path_cpu_to_qspi);
+ if (ret) {
+ dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
+ if (ret)
+ return ret;
+
+ return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);
+}
+
+static int __maybe_unused qcom_qspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ spi_master_resume(master);
+
+ return ret;
+}
+
+static int __maybe_unused qcom_qspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(master);
+ if (ret)
+ pm_runtime_force_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops qcom_qspi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_qspi_runtime_suspend,
+ qcom_qspi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(qcom_qspi_suspend, qcom_qspi_resume)
+};
+
+static const struct of_device_id qcom_qspi_dt_match[] = {
+ { .compatible = "qcom,qspi-v1", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_qspi_dt_match);
+
+static struct platform_driver qcom_qspi_driver = {
+ .driver = {
+ .name = "qcom_qspi",
+ .pm = &qcom_qspi_dev_pm_ops,
+ .of_match_table = qcom_qspi_dt_match,
+ },
+ .probe = qcom_qspi_probe,
+ .remove = qcom_qspi_remove,
+};
+module_platform_driver(qcom_qspi_driver);
+
+MODULE_DESCRIPTION("SPI driver for QSPI cores");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
new file mode 100644
index 000000000..fb6b7738b
--- /dev/null
+++ b/drivers/spi/spi-qup.c
@@ -0,0 +1,1329 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+
+#define QUP_CONFIG 0x0000
+#define QUP_STATE 0x0004
+#define QUP_IO_M_MODES 0x0008
+#define QUP_SW_RESET 0x000c
+#define QUP_OPERATIONAL 0x0018
+#define QUP_ERROR_FLAGS 0x001c
+#define QUP_ERROR_FLAGS_EN 0x0020
+#define QUP_OPERATIONAL_MASK 0x0028
+#define QUP_HW_VERSION 0x0030
+#define QUP_MX_OUTPUT_CNT 0x0100
+#define QUP_OUTPUT_FIFO 0x0110
+#define QUP_MX_WRITE_CNT 0x0150
+#define QUP_MX_INPUT_CNT 0x0200
+#define QUP_MX_READ_CNT 0x0208
+#define QUP_INPUT_FIFO 0x0218
+
+#define SPI_CONFIG 0x0300
+#define SPI_IO_CONTROL 0x0304
+#define SPI_ERROR_FLAGS 0x0308
+#define SPI_ERROR_FLAGS_EN 0x030c
+
+/* QUP_CONFIG fields */
+#define QUP_CONFIG_SPI_MODE (1 << 8)
+#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
+#define QUP_CONFIG_NO_INPUT BIT(7)
+#define QUP_CONFIG_NO_OUTPUT BIT(6)
+#define QUP_CONFIG_N 0x001f
+
+/* QUP_STATE fields */
+#define QUP_STATE_VALID BIT(2)
+#define QUP_STATE_RESET 0
+#define QUP_STATE_RUN 1
+#define QUP_STATE_PAUSE 3
+#define QUP_STATE_MASK 3
+#define QUP_STATE_CLEAR 2
+
+#define QUP_HW_VERSION_2_1_1 0x20010001
+
+/* QUP_IO_M_MODES fields */
+#define QUP_IO_M_PACK_EN BIT(15)
+#define QUP_IO_M_UNPACK_EN BIT(14)
+#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
+#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
+#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
+#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
+
+#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
+#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
+#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
+#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
+
+#define QUP_IO_M_MODE_FIFO 0
+#define QUP_IO_M_MODE_BLOCK 1
+#define QUP_IO_M_MODE_DMOV 2
+#define QUP_IO_M_MODE_BAM 3
+
+/* QUP_OPERATIONAL fields */
+#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
+#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
+#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
+#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
+#define QUP_OP_IN_SERVICE_FLAG BIT(9)
+#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
+#define QUP_OP_IN_FIFO_FULL BIT(7)
+#define QUP_OP_OUT_FIFO_FULL BIT(6)
+#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
+#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
+
+/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
+#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
+#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
+#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
+#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
+
+/* SPI_CONFIG fields */
+#define SPI_CONFIG_HS_MODE BIT(10)
+#define SPI_CONFIG_INPUT_FIRST BIT(9)
+#define SPI_CONFIG_LOOPBACK BIT(8)
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_FORCE_CS BIT(11)
+#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
+#define SPI_IO_C_MX_CS_MODE BIT(8)
+#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
+#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
+#define SPI_IO_C_CS_SELECT_MASK 0x000c
+#define SPI_IO_C_TRISTATE_CS BIT(1)
+#define SPI_IO_C_NO_TRI_STATE BIT(0)
+
+/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
+#define SPI_ERROR_CLK_OVER_RUN BIT(1)
+#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
+
+#define SPI_NUM_CHIPSELECTS 4
+
+#define SPI_MAX_XFER (SZ_64K - 64)
+
+/* high speed mode is when bus rate is greater then 26MHz */
+#define SPI_HS_MIN_RATE 26000000
+#define SPI_MAX_RATE 50000000
+
+#define SPI_DELAY_THRESHOLD 1
+#define SPI_DELAY_RETRY 10
+
+struct spi_qup {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *cclk; /* core clock */
+ struct clk *iclk; /* interface clock */
+ int irq;
+ spinlock_t lock;
+
+ int in_fifo_sz;
+ int out_fifo_sz;
+ int in_blk_sz;
+ int out_blk_sz;
+
+ struct spi_transfer *xfer;
+ struct completion done;
+ int error;
+ int w_size; /* bytes per SPI word */
+ int n_words;
+ int tx_bytes;
+ int rx_bytes;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int qup_v1;
+
+ int mode;
+ struct dma_slave_config rx_conf;
+ struct dma_slave_config tx_conf;
+};
+
+static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
+
+static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
+{
+ u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
+
+ return (opflag & flag) != 0;
+}
+
+static inline bool spi_qup_is_dma_xfer(int mode)
+{
+ if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
+ return true;
+
+ return false;
+}
+
+/* get's the transaction size length */
+static inline unsigned int spi_qup_len(struct spi_qup *controller)
+{
+ return controller->n_words * controller->w_size;
+}
+
+static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
+{
+ u32 opstate = readl_relaxed(controller->base + QUP_STATE);
+
+ return opstate & QUP_STATE_VALID;
+}
+
+static int spi_qup_set_state(struct spi_qup *controller, u32 state)
+{
+ unsigned long loop;
+ u32 cur_state;
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ if (loop)
+ dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
+ loop, state);
+
+ cur_state = readl_relaxed(controller->base + QUP_STATE);
+ /*
+ * Per spec: for PAUSE_STATE to RESET_STATE, two writes
+ * of (b10) are required
+ */
+ if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
+ (state == QUP_STATE_RESET)) {
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ } else {
+ cur_state &= ~QUP_STATE_MASK;
+ cur_state |= state;
+ writel_relaxed(cur_state, controller->base + QUP_STATE);
+ }
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
+{
+ u8 *rx_buf = controller->rx_buf;
+ int i, shift, num_bytes;
+ u32 word;
+
+ for (; num_words; num_words--) {
+
+ word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
+
+ num_bytes = min_t(int, spi_qup_len(controller) -
+ controller->rx_bytes,
+ controller->w_size);
+
+ if (!rx_buf) {
+ controller->rx_bytes += num_bytes;
+ continue;
+ }
+
+ for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
+ /*
+ * The data format depends on bytes per SPI word:
+ * 4 bytes: 0x12345678
+ * 2 bytes: 0x00001234
+ * 1 byte : 0x00000012
+ */
+ shift = BITS_PER_BYTE;
+ shift *= (controller->w_size - i - 1);
+ rx_buf[controller->rx_bytes] = word >> shift;
+ }
+ }
+}
+
+static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
+{
+ u32 remainder, words_per_block, num_words;
+ bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
+
+ remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
+ controller->w_size);
+ words_per_block = controller->in_blk_sz >> 2;
+
+ do {
+ /* ACK by clearing service flag */
+ writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
+ controller->base + QUP_OPERATIONAL);
+
+ if (!remainder)
+ goto exit;
+
+ if (is_block_mode) {
+ num_words = (remainder > words_per_block) ?
+ words_per_block : remainder;
+ } else {
+ if (!spi_qup_is_flag_set(controller,
+ QUP_OP_IN_FIFO_NOT_EMPTY))
+ break;
+
+ num_words = 1;
+ }
+
+ /* read up to the maximum transfer size available */
+ spi_qup_read_from_fifo(controller, num_words);
+
+ remainder -= num_words;
+
+ /* if block mode, check to see if next block is available */
+ if (is_block_mode && !spi_qup_is_flag_set(controller,
+ QUP_OP_IN_BLOCK_READ_REQ))
+ break;
+
+ } while (remainder);
+
+ /*
+ * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
+ * reads, it has to be cleared again at the very end. However, be sure
+ * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
+ * present and this is used to determine if transaction is complete
+ */
+exit:
+ if (!remainder) {
+ *opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
+ writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
+ controller->base + QUP_OPERATIONAL);
+ }
+}
+
+static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
+{
+ const u8 *tx_buf = controller->tx_buf;
+ int i, num_bytes;
+ u32 word, data;
+
+ for (; num_words; num_words--) {
+ word = 0;
+
+ num_bytes = min_t(int, spi_qup_len(controller) -
+ controller->tx_bytes,
+ controller->w_size);
+ if (tx_buf)
+ for (i = 0; i < num_bytes; i++) {
+ data = tx_buf[controller->tx_bytes + i];
+ word |= data << (BITS_PER_BYTE * (3 - i));
+ }
+
+ controller->tx_bytes += num_bytes;
+
+ writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
+ }
+}
+
+static void spi_qup_dma_done(void *data)
+{
+ struct spi_qup *qup = data;
+
+ complete(&qup->done);
+}
+
+static void spi_qup_write(struct spi_qup *controller)
+{
+ bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
+ u32 remainder, words_per_block, num_words;
+
+ remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
+ controller->w_size);
+ words_per_block = controller->out_blk_sz >> 2;
+
+ do {
+ /* ACK by clearing service flag */
+ writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
+ controller->base + QUP_OPERATIONAL);
+
+ /* make sure the interrupt is valid */
+ if (!remainder)
+ return;
+
+ if (is_block_mode) {
+ num_words = (remainder > words_per_block) ?
+ words_per_block : remainder;
+ } else {
+ if (spi_qup_is_flag_set(controller,
+ QUP_OP_OUT_FIFO_FULL))
+ break;
+
+ num_words = 1;
+ }
+
+ spi_qup_write_to_fifo(controller, num_words);
+
+ remainder -= num_words;
+
+ /* if block mode, check to see if next block is available */
+ if (is_block_mode && !spi_qup_is_flag_set(controller,
+ QUP_OP_OUT_BLOCK_WRITE_REQ))
+ break;
+
+ } while (remainder);
+}
+
+static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
+ unsigned int nents, enum dma_transfer_direction dir,
+ dma_async_tx_callback callback)
+{
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+
+ if (dir == DMA_MEM_TO_DEV)
+ chan = master->dma_tx;
+ else
+ chan = master->dma_rx;
+
+ desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
+ if (IS_ERR_OR_NULL(desc))
+ return desc ? PTR_ERR(desc) : -EINVAL;
+
+ desc->callback = callback;
+ desc->callback_param = qup;
+
+ cookie = dmaengine_submit(desc);
+
+ return dma_submit_error(cookie);
+}
+
+static void spi_qup_dma_terminate(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ if (xfer->tx_buf)
+ dmaengine_terminate_all(master->dma_tx);
+ if (xfer->rx_buf)
+ dmaengine_terminate_all(master->dma_rx);
+}
+
+static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
+ u32 *nents)
+{
+ struct scatterlist *sg;
+ u32 total = 0;
+
+ for (sg = sgl; sg; sg = sg_next(sg)) {
+ unsigned int len = sg_dma_len(sg);
+
+ /* check for overflow as well as limit */
+ if (((total + len) < total) || ((total + len) > max))
+ break;
+
+ total += len;
+ (*nents)++;
+ }
+
+ return total;
+}
+
+static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ unsigned long timeout)
+{
+ dma_async_tx_callback rx_done = NULL, tx_done = NULL;
+ struct spi_master *master = spi->master;
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ struct scatterlist *tx_sgl, *rx_sgl;
+ int ret;
+
+ if (xfer->rx_buf)
+ rx_done = spi_qup_dma_done;
+ else if (xfer->tx_buf)
+ tx_done = spi_qup_dma_done;
+
+ rx_sgl = xfer->rx_sg.sgl;
+ tx_sgl = xfer->tx_sg.sgl;
+
+ do {
+ u32 rx_nents = 0, tx_nents = 0;
+
+ if (rx_sgl)
+ qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
+ SPI_MAX_XFER, &rx_nents) / qup->w_size;
+ if (tx_sgl)
+ qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
+ SPI_MAX_XFER, &tx_nents) / qup->w_size;
+ if (!qup->n_words)
+ return -EIO;
+
+ ret = spi_qup_io_config(spi, xfer);
+ if (ret)
+ return ret;
+
+ /* before issuing the descriptors, set the QUP to run */
+ ret = spi_qup_set_state(qup, QUP_STATE_RUN);
+ if (ret) {
+ dev_warn(qup->dev, "cannot set RUN state\n");
+ return ret;
+ }
+ if (rx_sgl) {
+ ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
+ DMA_DEV_TO_MEM, rx_done);
+ if (ret)
+ return ret;
+ dma_async_issue_pending(master->dma_rx);
+ }
+
+ if (tx_sgl) {
+ ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
+ DMA_MEM_TO_DEV, tx_done);
+ if (ret)
+ return ret;
+
+ dma_async_issue_pending(master->dma_tx);
+ }
+
+ if (!wait_for_completion_timeout(&qup->done, timeout))
+ return -ETIMEDOUT;
+
+ for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
+ ;
+ for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
+ ;
+
+ } while (rx_sgl || tx_sgl);
+
+ return 0;
+}
+
+static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
+ unsigned long timeout)
+{
+ struct spi_master *master = spi->master;
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ int ret, n_words, iterations, offset = 0;
+
+ n_words = qup->n_words;
+ iterations = n_words / SPI_MAX_XFER; /* round down */
+ qup->rx_buf = xfer->rx_buf;
+ qup->tx_buf = xfer->tx_buf;
+
+ do {
+ if (iterations)
+ qup->n_words = SPI_MAX_XFER;
+ else
+ qup->n_words = n_words % SPI_MAX_XFER;
+
+ if (qup->tx_buf && offset)
+ qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
+
+ if (qup->rx_buf && offset)
+ qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
+
+ /*
+ * if the transaction is small enough, we need
+ * to fallback to FIFO mode
+ */
+ if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
+ qup->mode = QUP_IO_M_MODE_FIFO;
+
+ ret = spi_qup_io_config(spi, xfer);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(qup, QUP_STATE_RUN);
+ if (ret) {
+ dev_warn(qup->dev, "cannot set RUN state\n");
+ return ret;
+ }
+
+ ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
+ if (ret) {
+ dev_warn(qup->dev, "cannot set PAUSE state\n");
+ return ret;
+ }
+
+ if (qup->mode == QUP_IO_M_MODE_FIFO)
+ spi_qup_write(qup);
+
+ ret = spi_qup_set_state(qup, QUP_STATE_RUN);
+ if (ret) {
+ dev_warn(qup->dev, "cannot set RUN state\n");
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&qup->done, timeout))
+ return -ETIMEDOUT;
+
+ offset++;
+ } while (iterations--);
+
+ return 0;
+}
+
+static bool spi_qup_data_pending(struct spi_qup *controller)
+{
+ unsigned int remainder_tx, remainder_rx;
+
+ remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
+ controller->tx_bytes, controller->w_size);
+
+ remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
+ controller->rx_bytes, controller->w_size);
+
+ return remainder_tx || remainder_rx;
+}
+
+static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
+{
+ struct spi_qup *controller = dev_id;
+ u32 opflags, qup_err, spi_err;
+ int error = 0;
+
+ qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
+ spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
+ opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
+
+ writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
+ writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
+
+ if (qup_err) {
+ if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
+ dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
+ dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
+ dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
+ dev_warn(controller->dev, "INPUT_OVER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (spi_err) {
+ if (spi_err & SPI_ERROR_CLK_OVER_RUN)
+ dev_warn(controller->dev, "CLK_OVER_RUN\n");
+ if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
+ dev_warn(controller->dev, "CLK_UNDER_RUN\n");
+
+ error = -EIO;
+ }
+
+ spin_lock(&controller->lock);
+ if (!controller->error)
+ controller->error = error;
+ spin_unlock(&controller->lock);
+
+ if (spi_qup_is_dma_xfer(controller->mode)) {
+ writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
+ } else {
+ if (opflags & QUP_OP_IN_SERVICE_FLAG)
+ spi_qup_read(controller, &opflags);
+
+ if (opflags & QUP_OP_OUT_SERVICE_FLAG)
+ spi_qup_write(controller);
+
+ if (!spi_qup_data_pending(controller))
+ complete(&controller->done);
+ }
+
+ if (error)
+ complete(&controller->done);
+
+ if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
+ if (!spi_qup_is_dma_xfer(controller->mode)) {
+ if (spi_qup_data_pending(controller))
+ return IRQ_HANDLED;
+ }
+ complete(&controller->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* set clock freq ... bits per word, determine mode */
+static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(spi->master);
+ int ret;
+
+ if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
+ dev_err(controller->dev, "too big size for loopback %d > %d\n",
+ xfer->len, controller->in_fifo_sz);
+ return -EIO;
+ }
+
+ ret = clk_set_rate(controller->cclk, xfer->speed_hz);
+ if (ret) {
+ dev_err(controller->dev, "fail to set frequency %d",
+ xfer->speed_hz);
+ return -EIO;
+ }
+
+ controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
+ controller->n_words = xfer->len / controller->w_size;
+
+ if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
+ controller->mode = QUP_IO_M_MODE_FIFO;
+ else if (spi->master->can_dma &&
+ spi->master->can_dma(spi->master, spi, xfer) &&
+ spi->master->cur_msg_mapped)
+ controller->mode = QUP_IO_M_MODE_BAM;
+ else
+ controller->mode = QUP_IO_M_MODE_BLOCK;
+
+ return 0;
+}
+
+/* prep qup for another spi transaction of specific type */
+static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(spi->master);
+ u32 config, iomode, control;
+ unsigned long flags;
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = xfer;
+ controller->error = 0;
+ controller->rx_bytes = 0;
+ controller->tx_bytes = 0;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+
+ if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
+ dev_err(controller->dev, "cannot set RESET state\n");
+ return -EIO;
+ }
+
+ switch (controller->mode) {
+ case QUP_IO_M_MODE_FIFO:
+ writel_relaxed(controller->n_words,
+ controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(controller->n_words,
+ controller->base + QUP_MX_WRITE_CNT);
+ /* must be zero for FIFO */
+ writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
+ break;
+ case QUP_IO_M_MODE_BAM:
+ writel_relaxed(controller->n_words,
+ controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(controller->n_words,
+ controller->base + QUP_MX_OUTPUT_CNT);
+ /* must be zero for BLOCK and BAM */
+ writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
+
+ if (!controller->qup_v1) {
+ void __iomem *input_cnt;
+
+ input_cnt = controller->base + QUP_MX_INPUT_CNT;
+ /*
+ * for DMA transfers, both QUP_MX_INPUT_CNT and
+ * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
+ * That case is a non-balanced transfer when there is
+ * only a rx_buf.
+ */
+ if (xfer->tx_buf)
+ writel_relaxed(0, input_cnt);
+ else
+ writel_relaxed(controller->n_words, input_cnt);
+
+ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
+ }
+ break;
+ case QUP_IO_M_MODE_BLOCK:
+ reinit_completion(&controller->done);
+ writel_relaxed(controller->n_words,
+ controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(controller->n_words,
+ controller->base + QUP_MX_OUTPUT_CNT);
+ /* must be zero for BLOCK and BAM */
+ writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
+ break;
+ default:
+ dev_err(controller->dev, "unknown mode = %d\n",
+ controller->mode);
+ return -EIO;
+ }
+
+ iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
+ /* Set input and output transfer mode */
+ iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
+
+ if (!spi_qup_is_dma_xfer(controller->mode))
+ iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
+ else
+ iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
+
+ iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
+ iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
+
+ writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
+
+ control = readl_relaxed(controller->base + SPI_IO_CONTROL);
+
+ if (spi->mode & SPI_CPOL)
+ control |= SPI_IO_C_CLK_IDLE_HIGH;
+ else
+ control &= ~SPI_IO_C_CLK_IDLE_HIGH;
+
+ writel_relaxed(control, controller->base + SPI_IO_CONTROL);
+
+ config = readl_relaxed(controller->base + SPI_CONFIG);
+
+ if (spi->mode & SPI_LOOP)
+ config |= SPI_CONFIG_LOOPBACK;
+ else
+ config &= ~SPI_CONFIG_LOOPBACK;
+
+ if (spi->mode & SPI_CPHA)
+ config &= ~SPI_CONFIG_INPUT_FIRST;
+ else
+ config |= SPI_CONFIG_INPUT_FIRST;
+
+ /*
+ * HS_MODE improves signal stability for spi-clk high rates,
+ * but is invalid in loop back mode.
+ */
+ if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
+ config |= SPI_CONFIG_HS_MODE;
+ else
+ config &= ~SPI_CONFIG_HS_MODE;
+
+ writel_relaxed(config, controller->base + SPI_CONFIG);
+
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
+ config |= xfer->bits_per_word - 1;
+ config |= QUP_CONFIG_SPI_MODE;
+
+ if (spi_qup_is_dma_xfer(controller->mode)) {
+ if (!xfer->tx_buf)
+ config |= QUP_CONFIG_NO_OUTPUT;
+ if (!xfer->rx_buf)
+ config |= QUP_CONFIG_NO_INPUT;
+ }
+
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+
+ /* only write to OPERATIONAL_MASK when register is present */
+ if (!controller->qup_v1) {
+ u32 mask = 0;
+
+ /*
+ * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
+ * status change in BAM mode
+ */
+
+ if (spi_qup_is_dma_xfer(controller->mode))
+ mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
+
+ writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
+ }
+
+ return 0;
+}
+
+static int spi_qup_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ unsigned long timeout, flags;
+ int ret;
+
+ ret = spi_qup_io_prep(spi, xfer);
+ if (ret)
+ return ret;
+
+ timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
+ timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
+ xfer->len) * 8, timeout);
+ timeout = 100 * msecs_to_jiffies(timeout);
+
+ reinit_completion(&controller->done);
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = xfer;
+ controller->error = 0;
+ controller->rx_bytes = 0;
+ controller->tx_bytes = 0;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (spi_qup_is_dma_xfer(controller->mode))
+ ret = spi_qup_do_dma(spi, xfer, timeout);
+ else
+ ret = spi_qup_do_pio(spi, xfer, timeout);
+
+ spi_qup_set_state(controller, QUP_STATE_RESET);
+ spin_lock_irqsave(&controller->lock, flags);
+ if (!ret)
+ ret = controller->error;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (ret && spi_qup_is_dma_xfer(controller->mode))
+ spi_qup_dma_terminate(master, xfer);
+
+ return ret;
+}
+
+static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ size_t dma_align = dma_get_cache_alignment();
+ int n_words;
+
+ if (xfer->rx_buf) {
+ if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
+ IS_ERR_OR_NULL(master->dma_rx))
+ return false;
+ if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
+ return false;
+ }
+
+ if (xfer->tx_buf) {
+ if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
+ IS_ERR_OR_NULL(master->dma_tx))
+ return false;
+ if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
+ return false;
+ }
+
+ n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
+ if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
+ return false;
+
+ return true;
+}
+
+static void spi_qup_release_dma(struct spi_master *master)
+{
+ if (!IS_ERR_OR_NULL(master->dma_rx))
+ dma_release_channel(master->dma_rx);
+ if (!IS_ERR_OR_NULL(master->dma_tx))
+ dma_release_channel(master->dma_tx);
+}
+
+static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
+{
+ struct spi_qup *spi = spi_master_get_devdata(master);
+ struct dma_slave_config *rx_conf = &spi->rx_conf,
+ *tx_conf = &spi->tx_conf;
+ struct device *dev = spi->dev;
+ int ret;
+
+ /* allocate dma resources, if available */
+ master->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(master->dma_rx))
+ return PTR_ERR(master->dma_rx);
+
+ master->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(master->dma_tx)) {
+ ret = PTR_ERR(master->dma_tx);
+ goto err_tx;
+ }
+
+ /* set DMA parameters */
+ rx_conf->direction = DMA_DEV_TO_MEM;
+ rx_conf->device_fc = 1;
+ rx_conf->src_addr = base + QUP_INPUT_FIFO;
+ rx_conf->src_maxburst = spi->in_blk_sz;
+
+ tx_conf->direction = DMA_MEM_TO_DEV;
+ tx_conf->device_fc = 1;
+ tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
+ tx_conf->dst_maxburst = spi->out_blk_sz;
+
+ ret = dmaengine_slave_config(master->dma_rx, rx_conf);
+ if (ret) {
+ dev_err(dev, "failed to configure RX channel\n");
+ goto err;
+ }
+
+ ret = dmaengine_slave_config(master->dma_tx, tx_conf);
+ if (ret) {
+ dev_err(dev, "failed to configure TX channel\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dma_release_channel(master->dma_tx);
+err_tx:
+ dma_release_channel(master->dma_rx);
+ return ret;
+}
+
+static void spi_qup_set_cs(struct spi_device *spi, bool val)
+{
+ struct spi_qup *controller;
+ u32 spi_ioc;
+ u32 spi_ioc_orig;
+
+ controller = spi_master_get_devdata(spi->master);
+ spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
+ spi_ioc_orig = spi_ioc;
+ if (!val)
+ spi_ioc |= SPI_IO_C_FORCE_CS;
+ else
+ spi_ioc &= ~SPI_IO_C_FORCE_CS;
+
+ if (spi_ioc != spi_ioc_orig)
+ writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
+}
+
+static int spi_qup_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct clk *iclk, *cclk;
+ struct spi_qup *controller;
+ struct resource *res;
+ struct device *dev;
+ void __iomem *base;
+ u32 max_freq, iomode, num_cs;
+ int ret, irq, size;
+
+ dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ cclk = devm_clk_get(dev, "core");
+ if (IS_ERR(cclk))
+ return PTR_ERR(cclk);
+
+ iclk = devm_clk_get(dev, "iface");
+ if (IS_ERR(iclk))
+ return PTR_ERR(iclk);
+
+ /* This is optional parameter */
+ if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
+ max_freq = SPI_MAX_RATE;
+
+ if (!max_freq || max_freq > SPI_MAX_RATE) {
+ dev_err(dev, "invalid clock frequency %d\n", max_freq);
+ return -ENXIO;
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct spi_qup));
+ if (!master) {
+ dev_err(dev, "cannot allocate master\n");
+ return -ENOMEM;
+ }
+
+ /* use num-cs unless not present or out of range */
+ if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
+ num_cs > SPI_NUM_CHIPSELECTS)
+ master->num_chipselect = SPI_NUM_CHIPSELECTS;
+ else
+ master->num_chipselect = num_cs;
+
+ master->use_gpio_descriptors = true;
+ master->max_native_cs = SPI_NUM_CHIPSELECTS;
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->max_speed_hz = max_freq;
+ master->transfer_one = spi_qup_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+ master->dma_alignment = dma_get_cache_alignment();
+ master->max_dma_len = SPI_MAX_XFER;
+
+ platform_set_drvdata(pdev, master);
+
+ controller = spi_master_get_devdata(master);
+
+ controller->dev = dev;
+ controller->base = base;
+ controller->iclk = iclk;
+ controller->cclk = cclk;
+ controller->irq = irq;
+
+ ret = spi_qup_init_dma(master, res->start);
+ if (ret == -EPROBE_DEFER)
+ goto error;
+ else if (!ret)
+ master->can_dma = spi_qup_can_dma;
+
+ controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
+
+ if (!controller->qup_v1)
+ master->set_cs = spi_qup_set_cs;
+
+ spin_lock_init(&controller->lock);
+ init_completion(&controller->done);
+
+ ret = clk_prepare_enable(cclk);
+ if (ret) {
+ dev_err(dev, "cannot enable core clock\n");
+ goto error_dma;
+ }
+
+ ret = clk_prepare_enable(iclk);
+ if (ret) {
+ clk_disable_unprepare(cclk);
+ dev_err(dev, "cannot enable iface clock\n");
+ goto error_dma;
+ }
+
+ iomode = readl_relaxed(base + QUP_IO_M_MODES);
+
+ size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->out_blk_sz = size * 16;
+ else
+ controller->out_blk_sz = 4;
+
+ size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->in_blk_sz = size * 16;
+ else
+ controller->in_blk_sz = 4;
+
+ size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
+ controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
+
+ size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
+ controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
+
+ dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
+ controller->in_blk_sz, controller->in_fifo_sz,
+ controller->out_blk_sz, controller->out_fifo_sz);
+
+ writel_relaxed(1, base + QUP_SW_RESET);
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret) {
+ dev_err(dev, "cannot set RESET state\n");
+ goto error_clk;
+ }
+
+ writel_relaxed(0, base + QUP_OPERATIONAL);
+ writel_relaxed(0, base + QUP_IO_M_MODES);
+
+ if (!controller->qup_v1)
+ writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
+
+ writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
+ base + SPI_ERROR_FLAGS_EN);
+
+ /* if earlier version of the QUP, disable INPUT_OVERRUN */
+ if (controller->qup_v1)
+ writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
+ QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
+ base + QUP_ERROR_FLAGS_EN);
+
+ writel_relaxed(0, base + SPI_CONFIG);
+ writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
+
+ ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
+ IRQF_TRIGGER_HIGH, pdev->name, controller);
+ if (ret)
+ goto error_clk;
+
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+error_clk:
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+error_dma:
+ spi_qup_release_dma(master);
+error:
+ spi_master_put(master);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int spi_qup_pm_suspend_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Enable clocks auto gaiting */
+ config = readl(controller->base + QUP_CONFIG);
+ config |= QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+
+ return 0;
+}
+
+static int spi_qup_pm_resume_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+ int ret;
+
+ ret = clk_prepare_enable(controller->iclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(controller->cclk);
+ if (ret) {
+ clk_disable_unprepare(controller->iclk);
+ return ret;
+ }
+
+ /* Disable clocks auto gaiting */
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_qup_suspend(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ if (pm_runtime_suspended(device)) {
+ ret = spi_qup_pm_resume_runtime(device);
+ if (ret)
+ return ret;
+ }
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return 0;
+}
+
+static int spi_qup_resume(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(controller->iclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(controller->cclk);
+ if (ret) {
+ clk_disable_unprepare(controller->iclk);
+ return ret;
+ }
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ goto disable_clk;
+
+ ret = spi_master_resume(master);
+ if (ret)
+ goto disable_clk;
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int spi_qup_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+
+ if (ret >= 0) {
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
+ ERR_PTR(ret));
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ } else {
+ dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
+ ERR_PTR(ret));
+ }
+
+ spi_qup_release_dma(master);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id spi_qup_dt_match[] = {
+ { .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
+ { .compatible = "qcom,spi-qup-v2.1.1", },
+ { .compatible = "qcom,spi-qup-v2.2.1", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
+
+static const struct dev_pm_ops spi_qup_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
+ SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
+ spi_qup_pm_resume_runtime,
+ NULL)
+};
+
+static struct platform_driver spi_qup_driver = {
+ .driver = {
+ .name = "spi_qup",
+ .pm = &spi_qup_dev_pm_ops,
+ .of_match_table = spi_qup_dt_match,
+ },
+ .probe = spi_qup_probe,
+ .remove = spi_qup_remove,
+};
+module_platform_driver(spi_qup_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_qup");
diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c
new file mode 100644
index 000000000..9f97d18a0
--- /dev/null
+++ b/drivers/spi/spi-rb4xx.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI controller driver for the Mikrotik RB4xx boards
+ *
+ * Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2015 Bert Vermeulen <bert@biot.com>
+ *
+ * This file was based on the patches for Linux 2.6.27.39 published by
+ * MikroTik for their RouterBoard 4xx series devices.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/of.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+struct rb4xx_spi {
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static inline u32 rb4xx_read(struct rb4xx_spi *rbspi, u32 reg)
+{
+ return __raw_readl(rbspi->base + reg);
+}
+
+static inline void rb4xx_write(struct rb4xx_spi *rbspi, u32 reg, u32 value)
+{
+ __raw_writel(value, rbspi->base + reg);
+}
+
+static inline void do_spi_clk(struct rb4xx_spi *rbspi, u32 spi_ioc, int value)
+{
+ u32 regval;
+
+ regval = spi_ioc;
+ if (value & BIT(0))
+ regval |= AR71XX_SPI_IOC_DO;
+
+ rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval);
+ rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval | AR71XX_SPI_IOC_CLK);
+}
+
+static void do_spi_byte(struct rb4xx_spi *rbspi, u32 spi_ioc, u8 byte)
+{
+ int i;
+
+ for (i = 7; i >= 0; i--)
+ do_spi_clk(rbspi, spi_ioc, byte >> i);
+}
+
+/* The CS2 pin is used to clock in a second bit per clock cycle. */
+static inline void do_spi_clk_two(struct rb4xx_spi *rbspi, u32 spi_ioc,
+ u8 value)
+{
+ u32 regval;
+
+ regval = spi_ioc;
+ if (value & BIT(1))
+ regval |= AR71XX_SPI_IOC_DO;
+ if (value & BIT(0))
+ regval |= AR71XX_SPI_IOC_CS2;
+
+ rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval);
+ rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval | AR71XX_SPI_IOC_CLK);
+}
+
+/* Two bits at a time, msb first */
+static void do_spi_byte_two(struct rb4xx_spi *rbspi, u32 spi_ioc, u8 byte)
+{
+ do_spi_clk_two(rbspi, spi_ioc, byte >> 6);
+ do_spi_clk_two(rbspi, spi_ioc, byte >> 4);
+ do_spi_clk_two(rbspi, spi_ioc, byte >> 2);
+ do_spi_clk_two(rbspi, spi_ioc, byte >> 0);
+}
+
+static void rb4xx_set_cs(struct spi_device *spi, bool enable)
+{
+ struct rb4xx_spi *rbspi = spi_master_get_devdata(spi->master);
+
+ /*
+ * Setting CS is done along with bitbanging the actual values,
+ * since it's all on the same hardware register. However the
+ * CPLD needs CS deselected after every command.
+ */
+ if (enable)
+ rb4xx_write(rbspi, AR71XX_SPI_REG_IOC,
+ AR71XX_SPI_IOC_CS0 | AR71XX_SPI_IOC_CS1);
+}
+
+static int rb4xx_transfer_one(struct spi_master *master,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ struct rb4xx_spi *rbspi = spi_master_get_devdata(master);
+ int i;
+ u32 spi_ioc;
+ u8 *rx_buf;
+ const u8 *tx_buf;
+
+ /*
+ * Prime the SPI register with the SPI device selected. The m25p80 boot
+ * flash and CPLD share the CS0 pin. This works because the CPLD's
+ * command set was designed to almost not clash with that of the
+ * boot flash.
+ */
+ if (spi->chip_select == 2)
+ /* MMC */
+ spi_ioc = AR71XX_SPI_IOC_CS0;
+ else
+ /* Boot flash and CPLD */
+ spi_ioc = AR71XX_SPI_IOC_CS1;
+
+ tx_buf = t->tx_buf;
+ rx_buf = t->rx_buf;
+ for (i = 0; i < t->len; ++i) {
+ if (t->tx_nbits == SPI_NBITS_DUAL)
+ /* CPLD can use two-wire transfers */
+ do_spi_byte_two(rbspi, spi_ioc, tx_buf[i]);
+ else
+ do_spi_byte(rbspi, spi_ioc, tx_buf[i]);
+ if (!rx_buf)
+ continue;
+ rx_buf[i] = rb4xx_read(rbspi, AR71XX_SPI_REG_RDS);
+ }
+ spi_finalize_current_transfer(master);
+
+ return 0;
+}
+
+static int rb4xx_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct clk *ahb_clk;
+ struct rb4xx_spi *rbspi;
+ int err;
+ void __iomem *spi_base;
+
+ spi_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spi_base))
+ return PTR_ERR(spi_base);
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rbspi));
+ if (!master)
+ return -ENOMEM;
+
+ ahb_clk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(ahb_clk))
+ return PTR_ERR(ahb_clk);
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = 0;
+ master->num_chipselect = 3;
+ master->mode_bits = SPI_TX_DUAL;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->flags = SPI_MASTER_MUST_TX;
+ master->transfer_one = rb4xx_transfer_one;
+ master->set_cs = rb4xx_set_cs;
+
+ rbspi = spi_master_get_devdata(master);
+ rbspi->base = spi_base;
+ rbspi->clk = ahb_clk;
+ platform_set_drvdata(pdev, rbspi);
+
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register SPI master\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(ahb_clk);
+ if (err)
+ return err;
+
+ /* Enable SPI */
+ rb4xx_write(rbspi, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
+
+ return 0;
+}
+
+static int rb4xx_spi_remove(struct platform_device *pdev)
+{
+ struct rb4xx_spi *rbspi = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(rbspi->clk);
+
+ return 0;
+}
+
+static const struct of_device_id rb4xx_spi_dt_match[] = {
+ { .compatible = "mikrotik,rb4xx-spi" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rb4xx_spi_dt_match);
+
+static struct platform_driver rb4xx_spi_drv = {
+ .probe = rb4xx_spi_probe,
+ .remove = rb4xx_spi_remove,
+ .driver = {
+ .name = "rb4xx-spi",
+ .of_match_table = of_match_ptr(rb4xx_spi_dt_match),
+ },
+};
+
+module_platform_driver(rb4xx_spi_drv);
+
+MODULE_DESCRIPTION("Mikrotik RB4xx SPI controller driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_AUTHOR("Bert Vermeulen <bert@biot.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-realtek-rtl.c b/drivers/spi/spi-realtek-rtl.c
new file mode 100644
index 000000000..866b0477d
--- /dev/null
+++ b/drivers/spi/spi-realtek-rtl.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spi/spi.h>
+
+struct rtspi {
+ void __iomem *base;
+};
+
+/* SPI Flash Configuration Register */
+#define RTL_SPI_SFCR 0x00
+#define RTL_SPI_SFCR_RBO BIT(28)
+#define RTL_SPI_SFCR_WBO BIT(27)
+
+/* SPI Flash Control and Status Register */
+#define RTL_SPI_SFCSR 0x08
+#define RTL_SPI_SFCSR_CSB0 BIT(31)
+#define RTL_SPI_SFCSR_CSB1 BIT(30)
+#define RTL_SPI_SFCSR_RDY BIT(27)
+#define RTL_SPI_SFCSR_CS BIT(24)
+#define RTL_SPI_SFCSR_LEN_MASK ~(0x03 << 28)
+#define RTL_SPI_SFCSR_LEN1 (0x00 << 28)
+#define RTL_SPI_SFCSR_LEN4 (0x03 << 28)
+
+/* SPI Flash Data Register */
+#define RTL_SPI_SFDR 0x0c
+
+#define REG(x) (rtspi->base + x)
+
+
+static void rt_set_cs(struct spi_device *spi, bool active)
+{
+ struct rtspi *rtspi = spi_controller_get_devdata(spi->controller);
+ u32 value;
+
+ /* CS0 bit is active low */
+ value = readl(REG(RTL_SPI_SFCSR));
+ if (active)
+ value |= RTL_SPI_SFCSR_CSB0;
+ else
+ value &= ~RTL_SPI_SFCSR_CSB0;
+ writel(value, REG(RTL_SPI_SFCSR));
+}
+
+static void set_size(struct rtspi *rtspi, int size)
+{
+ u32 value;
+
+ value = readl(REG(RTL_SPI_SFCSR));
+ value &= RTL_SPI_SFCSR_LEN_MASK;
+ if (size == 4)
+ value |= RTL_SPI_SFCSR_LEN4;
+ else if (size == 1)
+ value |= RTL_SPI_SFCSR_LEN1;
+ writel(value, REG(RTL_SPI_SFCSR));
+}
+
+static inline void wait_ready(struct rtspi *rtspi)
+{
+ while (!(readl(REG(RTL_SPI_SFCSR)) & RTL_SPI_SFCSR_RDY))
+ cpu_relax();
+}
+static void send4(struct rtspi *rtspi, const u32 *buf)
+{
+ wait_ready(rtspi);
+ set_size(rtspi, 4);
+ writel(*buf, REG(RTL_SPI_SFDR));
+}
+
+static void send1(struct rtspi *rtspi, const u8 *buf)
+{
+ wait_ready(rtspi);
+ set_size(rtspi, 1);
+ writel(buf[0] << 24, REG(RTL_SPI_SFDR));
+}
+
+static void rcv4(struct rtspi *rtspi, u32 *buf)
+{
+ wait_ready(rtspi);
+ set_size(rtspi, 4);
+ *buf = readl(REG(RTL_SPI_SFDR));
+}
+
+static void rcv1(struct rtspi *rtspi, u8 *buf)
+{
+ wait_ready(rtspi);
+ set_size(rtspi, 1);
+ *buf = readl(REG(RTL_SPI_SFDR)) >> 24;
+}
+
+static int transfer_one(struct spi_controller *ctrl, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rtspi *rtspi = spi_controller_get_devdata(ctrl);
+ void *rx_buf;
+ const void *tx_buf;
+ int cnt;
+
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+ cnt = xfer->len;
+ if (tx_buf) {
+ while (cnt >= 4) {
+ send4(rtspi, tx_buf);
+ tx_buf += 4;
+ cnt -= 4;
+ }
+ while (cnt) {
+ send1(rtspi, tx_buf);
+ tx_buf++;
+ cnt--;
+ }
+ } else if (rx_buf) {
+ while (cnt >= 4) {
+ rcv4(rtspi, rx_buf);
+ rx_buf += 4;
+ cnt -= 4;
+ }
+ while (cnt) {
+ rcv1(rtspi, rx_buf);
+ rx_buf++;
+ cnt--;
+ }
+ }
+
+ spi_finalize_current_transfer(ctrl);
+
+ return 0;
+}
+
+static void init_hw(struct rtspi *rtspi)
+{
+ u32 value;
+
+ /* Turn on big-endian byte ordering */
+ value = readl(REG(RTL_SPI_SFCR));
+ value |= RTL_SPI_SFCR_RBO | RTL_SPI_SFCR_WBO;
+ writel(value, REG(RTL_SPI_SFCR));
+
+ value = readl(REG(RTL_SPI_SFCSR));
+ /* Permanently disable CS1, since it's never used */
+ value |= RTL_SPI_SFCSR_CSB1;
+ /* Select CS0 for use */
+ value &= RTL_SPI_SFCSR_CS;
+ writel(value, REG(RTL_SPI_SFCSR));
+}
+
+static int realtek_rtl_spi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctrl;
+ struct rtspi *rtspi;
+ int err;
+
+ ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*rtspi));
+ if (!ctrl) {
+ dev_err(&pdev->dev, "Error allocating SPI controller\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, ctrl);
+ rtspi = spi_controller_get_devdata(ctrl);
+
+ rtspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(rtspi->base)) {
+ dev_err(&pdev->dev, "Could not map SPI register address");
+ return -ENOMEM;
+ }
+
+ init_hw(rtspi);
+
+ ctrl->dev.of_node = pdev->dev.of_node;
+ ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ ctrl->set_cs = rt_set_cs;
+ ctrl->transfer_one = transfer_one;
+
+ err = devm_spi_register_controller(&pdev->dev, ctrl);
+ if (err) {
+ dev_err(&pdev->dev, "Could not register SPI controller\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+
+static const struct of_device_id realtek_rtl_spi_of_ids[] = {
+ { .compatible = "realtek,rtl8380-spi" },
+ { .compatible = "realtek,rtl8382-spi" },
+ { .compatible = "realtek,rtl8391-spi" },
+ { .compatible = "realtek,rtl8392-spi" },
+ { .compatible = "realtek,rtl8393-spi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, realtek_rtl_spi_of_ids);
+
+static struct platform_driver realtek_rtl_spi_driver = {
+ .probe = realtek_rtl_spi_probe,
+ .driver = {
+ .name = "realtek-rtl-spi",
+ .of_match_table = realtek_rtl_spi_of_ids,
+ },
+};
+
+module_platform_driver(realtek_rtl_spi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bert Vermeulen <bert@biot.com>");
+MODULE_DESCRIPTION("Realtek RTL SPI driver");
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
new file mode 100644
index 000000000..69347b6bf
--- /dev/null
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip Serial Flash Controller Driver
+ *
+ * Copyright (c) 2017-2021, Rockchip Inc.
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Chris Morgan <macroalpha82@gmail.com>
+ * Jon Lin <Jon.lin@rock-chips.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi-mem.h>
+
+/* System control */
+#define SFC_CTRL 0x0
+#define SFC_CTRL_PHASE_SEL_NEGETIVE BIT(1)
+#define SFC_CTRL_CMD_BITS_SHIFT 8
+#define SFC_CTRL_ADDR_BITS_SHIFT 10
+#define SFC_CTRL_DATA_BITS_SHIFT 12
+
+/* Interrupt mask */
+#define SFC_IMR 0x4
+#define SFC_IMR_RX_FULL BIT(0)
+#define SFC_IMR_RX_UFLOW BIT(1)
+#define SFC_IMR_TX_OFLOW BIT(2)
+#define SFC_IMR_TX_EMPTY BIT(3)
+#define SFC_IMR_TRAN_FINISH BIT(4)
+#define SFC_IMR_BUS_ERR BIT(5)
+#define SFC_IMR_NSPI_ERR BIT(6)
+#define SFC_IMR_DMA BIT(7)
+
+/* Interrupt clear */
+#define SFC_ICLR 0x8
+#define SFC_ICLR_RX_FULL BIT(0)
+#define SFC_ICLR_RX_UFLOW BIT(1)
+#define SFC_ICLR_TX_OFLOW BIT(2)
+#define SFC_ICLR_TX_EMPTY BIT(3)
+#define SFC_ICLR_TRAN_FINISH BIT(4)
+#define SFC_ICLR_BUS_ERR BIT(5)
+#define SFC_ICLR_NSPI_ERR BIT(6)
+#define SFC_ICLR_DMA BIT(7)
+
+/* FIFO threshold level */
+#define SFC_FTLR 0xc
+#define SFC_FTLR_TX_SHIFT 0
+#define SFC_FTLR_TX_MASK 0x1f
+#define SFC_FTLR_RX_SHIFT 8
+#define SFC_FTLR_RX_MASK 0x1f
+
+/* Reset FSM and FIFO */
+#define SFC_RCVR 0x10
+#define SFC_RCVR_RESET BIT(0)
+
+/* Enhanced mode */
+#define SFC_AX 0x14
+
+/* Address Bit number */
+#define SFC_ABIT 0x18
+
+/* Interrupt status */
+#define SFC_ISR 0x1c
+#define SFC_ISR_RX_FULL_SHIFT BIT(0)
+#define SFC_ISR_RX_UFLOW_SHIFT BIT(1)
+#define SFC_ISR_TX_OFLOW_SHIFT BIT(2)
+#define SFC_ISR_TX_EMPTY_SHIFT BIT(3)
+#define SFC_ISR_TX_FINISH_SHIFT BIT(4)
+#define SFC_ISR_BUS_ERR_SHIFT BIT(5)
+#define SFC_ISR_NSPI_ERR_SHIFT BIT(6)
+#define SFC_ISR_DMA_SHIFT BIT(7)
+
+/* FIFO status */
+#define SFC_FSR 0x20
+#define SFC_FSR_TX_IS_FULL BIT(0)
+#define SFC_FSR_TX_IS_EMPTY BIT(1)
+#define SFC_FSR_RX_IS_EMPTY BIT(2)
+#define SFC_FSR_RX_IS_FULL BIT(3)
+#define SFC_FSR_TXLV_MASK GENMASK(12, 8)
+#define SFC_FSR_TXLV_SHIFT 8
+#define SFC_FSR_RXLV_MASK GENMASK(20, 16)
+#define SFC_FSR_RXLV_SHIFT 16
+
+/* FSM status */
+#define SFC_SR 0x24
+#define SFC_SR_IS_IDLE 0x0
+#define SFC_SR_IS_BUSY 0x1
+
+/* Raw interrupt status */
+#define SFC_RISR 0x28
+#define SFC_RISR_RX_FULL BIT(0)
+#define SFC_RISR_RX_UNDERFLOW BIT(1)
+#define SFC_RISR_TX_OVERFLOW BIT(2)
+#define SFC_RISR_TX_EMPTY BIT(3)
+#define SFC_RISR_TRAN_FINISH BIT(4)
+#define SFC_RISR_BUS_ERR BIT(5)
+#define SFC_RISR_NSPI_ERR BIT(6)
+#define SFC_RISR_DMA BIT(7)
+
+/* Version */
+#define SFC_VER 0x2C
+#define SFC_VER_3 0x3
+#define SFC_VER_4 0x4
+#define SFC_VER_5 0x5
+
+/* Delay line controller resiter */
+#define SFC_DLL_CTRL0 0x3C
+#define SFC_DLL_CTRL0_SCLK_SMP_DLL BIT(15)
+#define SFC_DLL_CTRL0_DLL_MAX_VER4 0xFFU
+#define SFC_DLL_CTRL0_DLL_MAX_VER5 0x1FFU
+
+/* Master trigger */
+#define SFC_DMA_TRIGGER 0x80
+#define SFC_DMA_TRIGGER_START 1
+
+/* Src or Dst addr for master */
+#define SFC_DMA_ADDR 0x84
+
+/* Length control register extension 32GB */
+#define SFC_LEN_CTRL 0x88
+#define SFC_LEN_CTRL_TRB_SEL 1
+#define SFC_LEN_EXT 0x8C
+
+/* Command */
+#define SFC_CMD 0x100
+#define SFC_CMD_IDX_SHIFT 0
+#define SFC_CMD_DUMMY_SHIFT 8
+#define SFC_CMD_DIR_SHIFT 12
+#define SFC_CMD_DIR_RD 0
+#define SFC_CMD_DIR_WR 1
+#define SFC_CMD_ADDR_SHIFT 14
+#define SFC_CMD_ADDR_0BITS 0
+#define SFC_CMD_ADDR_24BITS 1
+#define SFC_CMD_ADDR_32BITS 2
+#define SFC_CMD_ADDR_XBITS 3
+#define SFC_CMD_TRAN_BYTES_SHIFT 16
+#define SFC_CMD_CS_SHIFT 30
+
+/* Address */
+#define SFC_ADDR 0x104
+
+/* Data */
+#define SFC_DATA 0x108
+
+/* The controller and documentation reports that it supports up to 4 CS
+ * devices (0-3), however I have only been able to test a single CS (CS 0)
+ * due to the configuration of my device.
+ */
+#define SFC_MAX_CHIPSELECT_NUM 4
+
+/* The SFC can transfer max 16KB - 1 at one time
+ * we set it to 15.5KB here for alignment.
+ */
+#define SFC_MAX_IOSIZE_VER3 (512 * 31)
+
+/* DMA is only enabled for large data transmission */
+#define SFC_DMA_TRANS_THRETHOLD (0x40)
+
+/* Maximum clock values from datasheet suggest keeping clock value under
+ * 150MHz. No minimum or average value is suggested.
+ */
+#define SFC_MAX_SPEED (150 * 1000 * 1000)
+
+struct rockchip_sfc {
+ struct device *dev;
+ void __iomem *regbase;
+ struct clk *hclk;
+ struct clk *clk;
+ u32 frequency;
+ /* virtual mapped addr for dma_buffer */
+ void *buffer;
+ dma_addr_t dma_buffer;
+ struct completion cp;
+ bool use_dma;
+ u32 max_iosize;
+ u16 version;
+};
+
+static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
+{
+ int err;
+ u32 status;
+
+ writel_relaxed(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
+
+ err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
+ !(status & SFC_RCVR_RESET), 20,
+ jiffies_to_usecs(HZ));
+ if (err)
+ dev_err(sfc->dev, "SFC reset never finished\n");
+
+ /* Still need to clear the masked interrupt from RISR */
+ writel_relaxed(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+
+ dev_dbg(sfc->dev, "reset\n");
+
+ return err;
+}
+
+static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
+{
+ return (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
+}
+
+static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
+{
+ return SFC_MAX_IOSIZE_VER3;
+}
+
+static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask)
+{
+ u32 reg;
+
+ /* Enable transfer complete interrupt */
+ reg = readl(sfc->regbase + SFC_IMR);
+ reg &= ~mask;
+ writel(reg, sfc->regbase + SFC_IMR);
+}
+
+static void rockchip_sfc_irq_mask(struct rockchip_sfc *sfc, u32 mask)
+{
+ u32 reg;
+
+ /* Disable transfer finish interrupt */
+ reg = readl(sfc->regbase + SFC_IMR);
+ reg |= mask;
+ writel(reg, sfc->regbase + SFC_IMR);
+}
+
+static int rockchip_sfc_init(struct rockchip_sfc *sfc)
+{
+ writel(0, sfc->regbase + SFC_CTRL);
+ writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+ rockchip_sfc_irq_mask(sfc, 0xFFFFFFFF);
+ if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
+ writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
+
+ return 0;
+}
+
+static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+ int ret = 0;
+ u32 status;
+
+ ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
+ status & SFC_FSR_TXLV_MASK, 0,
+ timeout_us);
+ if (ret) {
+ dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
+
+ return -ETIMEDOUT;
+ }
+
+ return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
+}
+
+static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+ int ret = 0;
+ u32 status;
+
+ ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
+ status & SFC_FSR_RXLV_MASK, 0,
+ timeout_us);
+ if (ret) {
+ dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
+
+ return -ETIMEDOUT;
+ }
+
+ return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
+}
+
+static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
+{
+ if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
+ /*
+ * SFC not support output DUMMY cycles right after CMD cycles, so
+ * treat it as ADDR cycles.
+ */
+ op->addr.nbytes = op->dummy.nbytes;
+ op->addr.buswidth = op->dummy.buswidth;
+ op->addr.val = 0xFFFFFFFFF;
+
+ op->dummy.nbytes = 0;
+ }
+}
+
+static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
+ struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u32 len)
+{
+ u32 ctrl = 0, cmd = 0;
+
+ /* set CMD */
+ cmd = op->cmd.opcode;
+ ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
+
+ /* set ADDR */
+ if (op->addr.nbytes) {
+ if (op->addr.nbytes == 4) {
+ cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
+ } else if (op->addr.nbytes == 3) {
+ cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
+ } else {
+ cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
+ writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT);
+ }
+
+ ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
+ }
+
+ /* set DUMMY */
+ if (op->dummy.nbytes) {
+ if (op->dummy.buswidth == 4)
+ cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
+ else if (op->dummy.buswidth == 2)
+ cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
+ else
+ cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
+ }
+
+ /* set DATA */
+ if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
+ writel(len, sfc->regbase + SFC_LEN_EXT);
+ else
+ cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
+ if (len) {
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
+
+ ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
+ }
+ if (!len && op->addr.nbytes)
+ cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
+
+ /* set the Controller */
+ ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
+ cmd |= mem->spi->chip_select << SFC_CMD_CS_SHIFT;
+
+ dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
+ op->addr.nbytes, op->addr.buswidth,
+ op->dummy.nbytes, op->dummy.buswidth);
+ dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
+ ctrl, cmd, op->addr.val, len);
+
+ writel(ctrl, sfc->regbase + SFC_CTRL);
+ writel(cmd, sfc->regbase + SFC_CMD);
+ if (op->addr.nbytes)
+ writel(op->addr.val, sfc->regbase + SFC_ADDR);
+
+ return 0;
+}
+
+static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
+{
+ u8 bytes = len & 0x3;
+ u32 dwords;
+ int tx_level;
+ u32 write_words;
+ u32 tmp = 0;
+
+ dwords = len >> 2;
+ while (dwords) {
+ tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
+ if (tx_level < 0)
+ return tx_level;
+ write_words = min_t(u32, tx_level, dwords);
+ iowrite32_rep(sfc->regbase + SFC_DATA, buf, write_words);
+ buf += write_words << 2;
+ dwords -= write_words;
+ }
+
+ /* write the rest non word aligned bytes */
+ if (bytes) {
+ tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
+ if (tx_level < 0)
+ return tx_level;
+ memcpy(&tmp, buf, bytes);
+ writel(tmp, sfc->regbase + SFC_DATA);
+ }
+
+ return len;
+}
+
+static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
+{
+ u8 bytes = len & 0x3;
+ u32 dwords;
+ u8 read_words;
+ int rx_level;
+ int tmp;
+
+ /* word aligned access only */
+ dwords = len >> 2;
+ while (dwords) {
+ rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
+ if (rx_level < 0)
+ return rx_level;
+ read_words = min_t(u32, rx_level, dwords);
+ ioread32_rep(sfc->regbase + SFC_DATA, buf, read_words);
+ buf += read_words << 2;
+ dwords -= read_words;
+ }
+
+ /* read the rest non word aligned bytes */
+ if (bytes) {
+ rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
+ if (rx_level < 0)
+ return rx_level;
+ tmp = readl(sfc->regbase + SFC_DATA);
+ memcpy(buf, &tmp, bytes);
+ }
+
+ return len;
+}
+
+static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
+{
+ writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+ writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
+ writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
+
+ return len;
+}
+
+static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
+ const struct spi_mem_op *op, u32 len)
+{
+ dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
+ else
+ return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
+}
+
+static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
+ const struct spi_mem_op *op, u32 len)
+{
+ int ret;
+
+ dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ memcpy(sfc->buffer, op->data.buf.out, len);
+
+ ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len);
+ if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) {
+ dev_err(sfc->dev, "DMA wait for transfer finish timeout\n");
+ ret = -ETIMEDOUT;
+ }
+ rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA);
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ memcpy(op->data.buf.in, sfc->buffer, len);
+
+ return ret;
+}
+
+static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+ int ret = 0;
+ u32 status;
+
+ ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
+ !(status & SFC_SR_IS_BUSY),
+ 20, timeout_us);
+ if (ret) {
+ dev_err(sfc->dev, "wait sfc idle timeout\n");
+ rockchip_sfc_reset(sfc);
+
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
+ u32 len = op->data.nbytes;
+ int ret;
+
+ if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) {
+ ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz);
+ if (ret)
+ return ret;
+ sfc->frequency = mem->spi->max_speed_hz;
+ dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n",
+ sfc->frequency, clk_get_rate(sfc->clk));
+ }
+
+ rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
+ rockchip_sfc_xfer_setup(sfc, mem, op, len);
+ if (len) {
+ if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
+ init_completion(&sfc->cp);
+ rockchip_sfc_irq_unmask(sfc, SFC_IMR_DMA);
+ ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
+ } else {
+ ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
+ }
+
+ if (ret != len) {
+ dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
+
+ return -EIO;
+ }
+ }
+
+ return rockchip_sfc_xfer_done(sfc, 100000);
+}
+
+static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
+
+ op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
+ .exec_op = rockchip_sfc_exec_mem_op,
+ .adjust_op_size = rockchip_sfc_adjust_op_size,
+};
+
+static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id)
+{
+ struct rockchip_sfc *sfc = dev_id;
+ u32 reg;
+
+ reg = readl(sfc->regbase + SFC_RISR);
+
+ /* Clear interrupt */
+ writel_relaxed(reg, sfc->regbase + SFC_ICLR);
+
+ if (reg & SFC_RISR_DMA) {
+ complete(&sfc->cp);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int rockchip_sfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct resource *res;
+ struct rockchip_sfc *sfc;
+ int ret;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*sfc));
+ if (!master)
+ return -ENOMEM;
+
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->mem_ops = &rockchip_sfc_mem_ops;
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
+ master->max_speed_hz = SFC_MAX_SPEED;
+ master->num_chipselect = SFC_MAX_CHIPSELECT_NUM;
+
+ sfc = spi_master_get_devdata(master);
+ sfc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sfc->regbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sfc->regbase))
+ return PTR_ERR(sfc->regbase);
+
+ sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
+ if (IS_ERR(sfc->clk)) {
+ dev_err(&pdev->dev, "Failed to get sfc interface clk\n");
+ return PTR_ERR(sfc->clk);
+ }
+
+ sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
+ if (IS_ERR(sfc->hclk)) {
+ dev_err(&pdev->dev, "Failed to get sfc ahb clk\n");
+ return PTR_ERR(sfc->hclk);
+ }
+
+ sfc->use_dma = !of_property_read_bool(sfc->dev->of_node,
+ "rockchip,sfc-no-dma");
+
+ if (sfc->use_dma) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_warn(dev, "Unable to set dma mask\n");
+ return ret;
+ }
+
+ sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3,
+ &sfc->dma_buffer,
+ GFP_KERNEL);
+ if (!sfc->buffer)
+ return -ENOMEM;
+ }
+
+ ret = clk_prepare_enable(sfc->hclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable ahb clk\n");
+ goto err_hclk;
+ }
+
+ ret = clk_prepare_enable(sfc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable interface clk\n");
+ goto err_clk;
+ }
+
+ /* Find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_irq;
+
+ ret = devm_request_irq(dev, ret, rockchip_sfc_irq_handler,
+ 0, pdev->name, sfc);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+
+ goto err_irq;
+ }
+
+ ret = rockchip_sfc_init(sfc);
+ if (ret)
+ goto err_irq;
+
+ sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
+ sfc->version = rockchip_sfc_get_version(sfc);
+
+ ret = spi_register_master(master);
+ if (ret)
+ goto err_irq;
+
+ return 0;
+
+err_irq:
+ clk_disable_unprepare(sfc->clk);
+err_clk:
+ clk_disable_unprepare(sfc->hclk);
+err_hclk:
+ return ret;
+}
+
+static int rockchip_sfc_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct rockchip_sfc *sfc = platform_get_drvdata(pdev);
+
+ spi_unregister_master(master);
+
+ clk_disable_unprepare(sfc->clk);
+ clk_disable_unprepare(sfc->hclk);
+
+ return 0;
+}
+
+static const struct of_device_id rockchip_sfc_dt_ids[] = {
+ { .compatible = "rockchip,sfc"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rockchip_sfc_dt_ids);
+
+static struct platform_driver rockchip_sfc_driver = {
+ .driver = {
+ .name = "rockchip-sfc",
+ .of_match_table = rockchip_sfc_dt_ids,
+ },
+ .probe = rockchip_sfc_probe,
+ .remove = rockchip_sfc_remove,
+};
+module_platform_driver(rockchip_sfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Rockchip Serial Flash Controller Driver");
+MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_AUTHOR("Jon Lin <Jon.lin@rock-chips.com>");
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
new file mode 100644
index 000000000..79242dc52
--- /dev/null
+++ b/drivers/spi/spi-rockchip.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ * Author: Addy Ke <addy.ke@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+
+#define DRIVER_NAME "rockchip-spi"
+
+#define ROCKCHIP_SPI_CLR_BITS(reg, bits) \
+ writel_relaxed(readl_relaxed(reg) & ~(bits), reg)
+#define ROCKCHIP_SPI_SET_BITS(reg, bits) \
+ writel_relaxed(readl_relaxed(reg) | (bits), reg)
+
+/* SPI register offsets */
+#define ROCKCHIP_SPI_CTRLR0 0x0000
+#define ROCKCHIP_SPI_CTRLR1 0x0004
+#define ROCKCHIP_SPI_SSIENR 0x0008
+#define ROCKCHIP_SPI_SER 0x000c
+#define ROCKCHIP_SPI_BAUDR 0x0010
+#define ROCKCHIP_SPI_TXFTLR 0x0014
+#define ROCKCHIP_SPI_RXFTLR 0x0018
+#define ROCKCHIP_SPI_TXFLR 0x001c
+#define ROCKCHIP_SPI_RXFLR 0x0020
+#define ROCKCHIP_SPI_SR 0x0024
+#define ROCKCHIP_SPI_IPR 0x0028
+#define ROCKCHIP_SPI_IMR 0x002c
+#define ROCKCHIP_SPI_ISR 0x0030
+#define ROCKCHIP_SPI_RISR 0x0034
+#define ROCKCHIP_SPI_ICR 0x0038
+#define ROCKCHIP_SPI_DMACR 0x003c
+#define ROCKCHIP_SPI_DMATDLR 0x0040
+#define ROCKCHIP_SPI_DMARDLR 0x0044
+#define ROCKCHIP_SPI_VERSION 0x0048
+#define ROCKCHIP_SPI_TXDR 0x0400
+#define ROCKCHIP_SPI_RXDR 0x0800
+
+/* Bit fields in CTRLR0 */
+#define CR0_DFS_OFFSET 0
+#define CR0_DFS_4BIT 0x0
+#define CR0_DFS_8BIT 0x1
+#define CR0_DFS_16BIT 0x2
+
+#define CR0_CFS_OFFSET 2
+
+#define CR0_SCPH_OFFSET 6
+
+#define CR0_SCPOL_OFFSET 7
+
+#define CR0_CSM_OFFSET 8
+#define CR0_CSM_KEEP 0x0
+/* ss_n be high for half sclk_out cycles */
+#define CR0_CSM_HALF 0X1
+/* ss_n be high for one sclk_out cycle */
+#define CR0_CSM_ONE 0x2
+
+/* ss_n to sclk_out delay */
+#define CR0_SSD_OFFSET 10
+/*
+ * The period between ss_n active and
+ * sclk_out active is half sclk_out cycles
+ */
+#define CR0_SSD_HALF 0x0
+/*
+ * The period between ss_n active and
+ * sclk_out active is one sclk_out cycle
+ */
+#define CR0_SSD_ONE 0x1
+
+#define CR0_EM_OFFSET 11
+#define CR0_EM_LITTLE 0x0
+#define CR0_EM_BIG 0x1
+
+#define CR0_FBM_OFFSET 12
+#define CR0_FBM_MSB 0x0
+#define CR0_FBM_LSB 0x1
+
+#define CR0_BHT_OFFSET 13
+#define CR0_BHT_16BIT 0x0
+#define CR0_BHT_8BIT 0x1
+
+#define CR0_RSD_OFFSET 14
+#define CR0_RSD_MAX 0x3
+
+#define CR0_FRF_OFFSET 16
+#define CR0_FRF_SPI 0x0
+#define CR0_FRF_SSP 0x1
+#define CR0_FRF_MICROWIRE 0x2
+
+#define CR0_XFM_OFFSET 18
+#define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
+#define CR0_XFM_TR 0x0
+#define CR0_XFM_TO 0x1
+#define CR0_XFM_RO 0x2
+
+#define CR0_OPM_OFFSET 20
+#define CR0_OPM_MASTER 0x0
+#define CR0_OPM_SLAVE 0x1
+
+#define CR0_SOI_OFFSET 23
+
+#define CR0_MTM_OFFSET 0x21
+
+/* Bit fields in SER, 2bit */
+#define SER_MASK 0x3
+
+/* Bit fields in BAUDR */
+#define BAUDR_SCKDV_MIN 2
+#define BAUDR_SCKDV_MAX 65534
+
+/* Bit fields in SR, 6bit */
+#define SR_MASK 0x3f
+#define SR_BUSY (1 << 0)
+#define SR_TF_FULL (1 << 1)
+#define SR_TF_EMPTY (1 << 2)
+#define SR_RF_EMPTY (1 << 3)
+#define SR_RF_FULL (1 << 4)
+#define SR_SLAVE_TX_BUSY (1 << 5)
+
+/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
+#define INT_MASK 0x1f
+#define INT_TF_EMPTY (1 << 0)
+#define INT_TF_OVERFLOW (1 << 1)
+#define INT_RF_UNDERFLOW (1 << 2)
+#define INT_RF_OVERFLOW (1 << 3)
+#define INT_RF_FULL (1 << 4)
+#define INT_CS_INACTIVE (1 << 6)
+
+/* Bit fields in ICR, 4bit */
+#define ICR_MASK 0x0f
+#define ICR_ALL (1 << 0)
+#define ICR_RF_UNDERFLOW (1 << 1)
+#define ICR_RF_OVERFLOW (1 << 2)
+#define ICR_TF_OVERFLOW (1 << 3)
+
+/* Bit fields in DMACR */
+#define RF_DMA_EN (1 << 0)
+#define TF_DMA_EN (1 << 1)
+
+/* Driver state flags */
+#define RXDMA (1 << 0)
+#define TXDMA (1 << 1)
+
+/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
+#define MAX_SCLK_OUT 50000000U
+
+/*
+ * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
+ * the controller seems to hang when given 0x10000, so stick with this for now.
+ */
+#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
+
+/* 2 for native cs, 2 for cs-gpio */
+#define ROCKCHIP_SPI_MAX_CS_NUM 4
+#define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002
+#define ROCKCHIP_SPI_VER2_TYPE2 0x00110002
+
+#define ROCKCHIP_AUTOSUSPEND_TIMEOUT 2000
+
+struct rockchip_spi {
+ struct device *dev;
+
+ struct clk *spiclk;
+ struct clk *apb_pclk;
+
+ void __iomem *regs;
+ dma_addr_t dma_addr_rx;
+ dma_addr_t dma_addr_tx;
+
+ const void *tx;
+ void *rx;
+ unsigned int tx_left;
+ unsigned int rx_left;
+
+ atomic_t state;
+
+ /*depth of the FIFO buffer */
+ u32 fifo_len;
+ /* frequency of spiclk */
+ u32 freq;
+
+ u8 n_bytes;
+ u8 rsd;
+
+ bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
+
+ bool slave_abort;
+ bool cs_inactive; /* spi slave tansmition stop when cs inactive */
+ bool cs_high_supported; /* native CS supports active-high polarity */
+
+ struct spi_transfer *xfer; /* Store xfer temporarily */
+};
+
+static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
+{
+ writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
+}
+
+static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(5);
+
+ do {
+ if (slave_mode) {
+ if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_SLAVE_TX_BUSY) &&
+ !((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)))
+ return;
+ } else {
+ if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
+ return;
+ }
+ } while (!time_after(jiffies, timeout));
+
+ dev_warn(rs->dev, "spi controller is in busy state!\n");
+}
+
+static u32 get_fifo_len(struct rockchip_spi *rs)
+{
+ u32 ver;
+
+ ver = readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION);
+
+ switch (ver) {
+ case ROCKCHIP_SPI_VER2_TYPE1:
+ case ROCKCHIP_SPI_VER2_TYPE2:
+ return 64;
+ default:
+ return 32;
+ }
+}
+
+static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
+
+ /* Return immediately for no-op */
+ if (cs_asserted == rs->cs_asserted[spi->chip_select])
+ return;
+
+ if (cs_asserted) {
+ /* Keep things powered as long as CS is asserted */
+ pm_runtime_get_sync(rs->dev);
+
+ if (spi->cs_gpiod)
+ ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
+ else
+ ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
+ } else {
+ if (spi->cs_gpiod)
+ ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
+ else
+ ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
+
+ /* Drop reference from when we first asserted CS */
+ pm_runtime_put(rs->dev);
+ }
+
+ rs->cs_asserted[spi->chip_select] = cs_asserted;
+}
+
+static void rockchip_spi_handle_err(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+
+ /* stop running spi transfer
+ * this also flushes both rx and tx fifos
+ */
+ spi_enable_chip(rs, false);
+
+ /* make sure all interrupts are masked and status cleared */
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
+
+ if (atomic_read(&rs->state) & TXDMA)
+ dmaengine_terminate_async(ctlr->dma_tx);
+
+ if (atomic_read(&rs->state) & RXDMA)
+ dmaengine_terminate_async(ctlr->dma_rx);
+}
+
+static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
+{
+ u32 tx_free = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
+ u32 words = min(rs->tx_left, tx_free);
+
+ rs->tx_left -= words;
+ for (; words; words--) {
+ u32 txw;
+
+ if (rs->n_bytes == 1)
+ txw = *(u8 *)rs->tx;
+ else
+ txw = *(u16 *)rs->tx;
+
+ writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
+ rs->tx += rs->n_bytes;
+ }
+}
+
+static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
+{
+ u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+ u32 rx_left = (rs->rx_left > words) ? rs->rx_left - words : 0;
+
+ /* the hardware doesn't allow us to change fifo threshold
+ * level while spi is enabled, so instead make sure to leave
+ * enough words in the rx fifo to get the last interrupt
+ * exactly when all words have been received
+ */
+ if (rx_left) {
+ u32 ftl = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFTLR) + 1;
+
+ if (rx_left < ftl) {
+ rx_left = ftl;
+ words = rs->rx_left - rx_left;
+ }
+ }
+
+ rs->rx_left = rx_left;
+ for (; words; words--) {
+ u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+
+ if (!rs->rx)
+ continue;
+
+ if (rs->n_bytes == 1)
+ *(u8 *)rs->rx = (u8)rxw;
+ else
+ *(u16 *)rs->rx = (u16)rxw;
+ rs->rx += rs->n_bytes;
+ }
+}
+
+static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
+{
+ struct spi_controller *ctlr = dev_id;
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+
+ /* When int_cs_inactive comes, spi slave abort */
+ if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) {
+ ctlr->slave_abort(ctlr);
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
+
+ return IRQ_HANDLED;
+ }
+
+ if (rs->tx_left)
+ rockchip_spi_pio_writer(rs);
+
+ rockchip_spi_pio_reader(rs);
+ if (!rs->rx_left) {
+ spi_enable_chip(rs, false);
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
+ spi_finalize_current_transfer(ctlr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
+ struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ rs->tx = xfer->tx_buf;
+ rs->rx = xfer->rx_buf;
+ rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
+ rs->rx_left = xfer->len / rs->n_bytes;
+
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
+
+ spi_enable_chip(rs, true);
+
+ if (rs->tx_left)
+ rockchip_spi_pio_writer(rs);
+
+ if (rs->cs_inactive)
+ writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
+ else
+ writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
+
+ /* 1 means the transfer is in progress */
+ return 1;
+}
+
+static void rockchip_spi_dma_rxcb(void *data)
+{
+ struct spi_controller *ctlr = data;
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ int state = atomic_fetch_andnot(RXDMA, &rs->state);
+
+ if (state & TXDMA && !rs->slave_abort)
+ return;
+
+ if (rs->cs_inactive)
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+
+ spi_enable_chip(rs, false);
+ spi_finalize_current_transfer(ctlr);
+}
+
+static void rockchip_spi_dma_txcb(void *data)
+{
+ struct spi_controller *ctlr = data;
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ int state = atomic_fetch_andnot(TXDMA, &rs->state);
+
+ if (state & RXDMA && !rs->slave_abort)
+ return;
+
+ /* Wait until the FIFO data completely. */
+ wait_for_tx_idle(rs, ctlr->slave);
+
+ spi_enable_chip(rs, false);
+ spi_finalize_current_transfer(ctlr);
+}
+
+static u32 rockchip_spi_calc_burst_size(u32 data_len)
+{
+ u32 i;
+
+ /* burst size: 1, 2, 4, 8 */
+ for (i = 1; i < 8; i <<= 1) {
+ if (data_len & i)
+ break;
+ }
+
+ return i;
+}
+
+static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
+ struct spi_controller *ctlr, struct spi_transfer *xfer)
+{
+ struct dma_async_tx_descriptor *rxdesc, *txdesc;
+
+ atomic_set(&rs->state, 0);
+
+ rs->tx = xfer->tx_buf;
+ rs->rx = xfer->rx_buf;
+
+ rxdesc = NULL;
+ if (xfer->rx_buf) {
+ struct dma_slave_config rxconf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = rs->dma_addr_rx,
+ .src_addr_width = rs->n_bytes,
+ .src_maxburst = rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes),
+ };
+
+ dmaengine_slave_config(ctlr->dma_rx, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(
+ ctlr->dma_rx,
+ xfer->rx_sg.sgl, xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+ if (!rxdesc)
+ return -EINVAL;
+
+ rxdesc->callback = rockchip_spi_dma_rxcb;
+ rxdesc->callback_param = ctlr;
+ }
+
+ txdesc = NULL;
+ if (xfer->tx_buf) {
+ struct dma_slave_config txconf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = rs->dma_addr_tx,
+ .dst_addr_width = rs->n_bytes,
+ .dst_maxburst = rs->fifo_len / 4,
+ };
+
+ dmaengine_slave_config(ctlr->dma_tx, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(
+ ctlr->dma_tx,
+ xfer->tx_sg.sgl, xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ if (!txdesc) {
+ if (rxdesc)
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ return -EINVAL;
+ }
+
+ txdesc->callback = rockchip_spi_dma_txcb;
+ txdesc->callback_param = ctlr;
+ }
+
+ /* rx must be started before tx due to spi instinct */
+ if (rxdesc) {
+ atomic_or(RXDMA, &rs->state);
+ ctlr->dma_rx->cookie = dmaengine_submit(rxdesc);
+ dma_async_issue_pending(ctlr->dma_rx);
+ }
+
+ if (rs->cs_inactive)
+ writel_relaxed(INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
+
+ spi_enable_chip(rs, true);
+
+ if (txdesc) {
+ atomic_or(TXDMA, &rs->state);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(ctlr->dma_tx);
+ }
+
+ /* 1 means the transfer is in progress */
+ return 1;
+}
+
+static int rockchip_spi_config(struct rockchip_spi *rs,
+ struct spi_device *spi, struct spi_transfer *xfer,
+ bool use_dma, bool slave_mode)
+{
+ u32 cr0 = CR0_FRF_SPI << CR0_FRF_OFFSET
+ | CR0_BHT_8BIT << CR0_BHT_OFFSET
+ | CR0_SSD_ONE << CR0_SSD_OFFSET
+ | CR0_EM_BIG << CR0_EM_OFFSET;
+ u32 cr1;
+ u32 dmacr = 0;
+
+ if (slave_mode)
+ cr0 |= CR0_OPM_SLAVE << CR0_OPM_OFFSET;
+ rs->slave_abort = false;
+
+ cr0 |= rs->rsd << CR0_RSD_OFFSET;
+ cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
+ if (spi->mode & SPI_LSB_FIRST)
+ cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
+ if (spi->mode & SPI_CS_HIGH)
+ cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
+
+ if (xfer->rx_buf && xfer->tx_buf)
+ cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
+ else if (xfer->rx_buf)
+ cr0 |= CR0_XFM_RO << CR0_XFM_OFFSET;
+ else if (use_dma)
+ cr0 |= CR0_XFM_TO << CR0_XFM_OFFSET;
+
+ switch (xfer->bits_per_word) {
+ case 4:
+ cr0 |= CR0_DFS_4BIT << CR0_DFS_OFFSET;
+ cr1 = xfer->len - 1;
+ break;
+ case 8:
+ cr0 |= CR0_DFS_8BIT << CR0_DFS_OFFSET;
+ cr1 = xfer->len - 1;
+ break;
+ case 16:
+ cr0 |= CR0_DFS_16BIT << CR0_DFS_OFFSET;
+ cr1 = xfer->len / 2 - 1;
+ break;
+ default:
+ /* we only whitelist 4, 8 and 16 bit words in
+ * ctlr->bits_per_word_mask, so this shouldn't
+ * happen
+ */
+ dev_err(rs->dev, "unknown bits per word: %d\n",
+ xfer->bits_per_word);
+ return -EINVAL;
+ }
+
+ if (use_dma) {
+ if (xfer->tx_buf)
+ dmacr |= TF_DMA_EN;
+ if (xfer->rx_buf)
+ dmacr |= RF_DMA_EN;
+ }
+
+ writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
+ writel_relaxed(cr1, rs->regs + ROCKCHIP_SPI_CTRLR1);
+
+ /* unfortunately setting the fifo threshold level to generate an
+ * interrupt exactly when the fifo is full doesn't seem to work,
+ * so we need the strict inequality here
+ */
+ if ((xfer->len / rs->n_bytes) < rs->fifo_len)
+ writel_relaxed(xfer->len / rs->n_bytes - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
+ else
+ writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
+
+ writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
+ writel_relaxed(rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes) - 1,
+ rs->regs + ROCKCHIP_SPI_DMARDLR);
+ writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
+
+ /* the hardware only supports an even clock divisor, so
+ * round divisor = spiclk / speed up to nearest even number
+ * so that the resulting speed is <= the requested speed
+ */
+ writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
+ rs->regs + ROCKCHIP_SPI_BAUDR);
+
+ return 0;
+}
+
+static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
+{
+ return ROCKCHIP_SPI_MAX_TRANLEN;
+}
+
+static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
+{
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ u32 rx_fifo_left;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ /* Get current dma rx point */
+ if (atomic_read(&rs->state) & RXDMA) {
+ dmaengine_pause(ctlr->dma_rx);
+ status = dmaengine_tx_status(ctlr->dma_rx, ctlr->dma_rx->cookie, &state);
+ if (status == DMA_ERROR) {
+ rs->rx = rs->xfer->rx_buf;
+ rs->xfer->len = 0;
+ rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+ for (; rx_fifo_left; rx_fifo_left--)
+ readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+ goto out;
+ } else {
+ rs->rx += rs->xfer->len - rs->n_bytes * state.residue;
+ }
+ }
+
+ /* Get the valid data left in rx fifo and set rs->xfer->len real rx size */
+ if (rs->rx) {
+ rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+ for (; rx_fifo_left; rx_fifo_left--) {
+ u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+
+ if (rs->n_bytes == 1)
+ *(u8 *)rs->rx = (u8)rxw;
+ else
+ *(u16 *)rs->rx = (u16)rxw;
+ rs->rx += rs->n_bytes;
+ }
+ rs->xfer->len = (unsigned int)(rs->rx - rs->xfer->rx_buf);
+ }
+
+out:
+ if (atomic_read(&rs->state) & RXDMA)
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ if (atomic_read(&rs->state) & TXDMA)
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ atomic_set(&rs->state, 0);
+ spi_enable_chip(rs, false);
+ rs->slave_abort = true;
+ spi_finalize_current_transfer(ctlr);
+
+ return 0;
+}
+
+static int rockchip_spi_transfer_one(
+ struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ int ret;
+ bool use_dma;
+
+ /* Zero length transfers won't trigger an interrupt on completion */
+ if (!xfer->len) {
+ spi_finalize_current_transfer(ctlr);
+ return 1;
+ }
+
+ WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
+ (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
+
+ if (!xfer->tx_buf && !xfer->rx_buf) {
+ dev_err(rs->dev, "No buffer for transfer\n");
+ return -EINVAL;
+ }
+
+ if (xfer->len > ROCKCHIP_SPI_MAX_TRANLEN) {
+ dev_err(rs->dev, "Transfer is too long (%d)\n", xfer->len);
+ return -EINVAL;
+ }
+
+ rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
+ rs->xfer = xfer;
+ use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
+
+ ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
+ if (ret)
+ return ret;
+
+ if (use_dma)
+ return rockchip_spi_prepare_dma(rs, ctlr, xfer);
+
+ return rockchip_spi_prepare_irq(rs, ctlr, xfer);
+}
+
+static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ unsigned int bytes_per_word = xfer->bits_per_word <= 8 ? 1 : 2;
+
+ /* if the numbor of spi words to transfer is less than the fifo
+ * length we can just fill the fifo and wait for a single irq,
+ * so don't bother setting up dma
+ */
+ return xfer->len / bytes_per_word >= rs->fifo_len;
+}
+
+static int rockchip_spi_setup(struct spi_device *spi)
+{
+ struct rockchip_spi *rs = spi_controller_get_devdata(spi->controller);
+ u32 cr0;
+
+ if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH) && !rs->cs_high_supported) {
+ dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(rs->dev);
+
+ cr0 = readl_relaxed(rs->regs + ROCKCHIP_SPI_CTRLR0);
+
+ cr0 &= ~(0x3 << CR0_SCPH_OFFSET);
+ cr0 |= ((spi->mode & 0x3) << CR0_SCPH_OFFSET);
+ if (spi->mode & SPI_CS_HIGH && spi->chip_select <= 1)
+ cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
+ else if (spi->chip_select <= 1)
+ cr0 &= ~(BIT(spi->chip_select) << CR0_SOI_OFFSET);
+
+ writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
+
+ pm_runtime_put(rs->dev);
+
+ return 0;
+}
+
+static int rockchip_spi_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct rockchip_spi *rs;
+ struct spi_controller *ctlr;
+ struct resource *mem;
+ struct device_node *np = pdev->dev.of_node;
+ u32 rsd_nsecs, num_cs;
+ bool slave_mode;
+
+ slave_mode = of_property_read_bool(np, "spi-slave");
+
+ if (slave_mode)
+ ctlr = spi_alloc_slave(&pdev->dev,
+ sizeof(struct rockchip_spi));
+ else
+ ctlr = spi_alloc_master(&pdev->dev,
+ sizeof(struct rockchip_spi));
+
+ if (!ctlr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ctlr);
+
+ rs = spi_controller_get_devdata(ctlr);
+ ctlr->slave = slave_mode;
+
+ /* Get basic io resource and map it */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rs->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rs->regs)) {
+ ret = PTR_ERR(rs->regs);
+ goto err_put_ctlr;
+ }
+
+ rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(rs->apb_pclk)) {
+ dev_err(&pdev->dev, "Failed to get apb_pclk\n");
+ ret = PTR_ERR(rs->apb_pclk);
+ goto err_put_ctlr;
+ }
+
+ rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
+ if (IS_ERR(rs->spiclk)) {
+ dev_err(&pdev->dev, "Failed to get spi_pclk\n");
+ ret = PTR_ERR(rs->spiclk);
+ goto err_put_ctlr;
+ }
+
+ ret = clk_prepare_enable(rs->apb_pclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
+ goto err_put_ctlr;
+ }
+
+ ret = clk_prepare_enable(rs->spiclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable spi_clk\n");
+ goto err_disable_apbclk;
+ }
+
+ spi_enable_chip(rs, false);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_disable_spiclk;
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret, rockchip_spi_isr, NULL,
+ IRQF_ONESHOT, dev_name(&pdev->dev), ctlr);
+ if (ret)
+ goto err_disable_spiclk;
+
+ rs->dev = &pdev->dev;
+ rs->freq = clk_get_rate(rs->spiclk);
+
+ if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns",
+ &rsd_nsecs)) {
+ /* rx sample delay is expressed in parent clock cycles (max 3) */
+ u32 rsd = DIV_ROUND_CLOSEST(rsd_nsecs * (rs->freq >> 8),
+ 1000000000 >> 8);
+ if (!rsd) {
+ dev_warn(rs->dev, "%u Hz are too slow to express %u ns delay\n",
+ rs->freq, rsd_nsecs);
+ } else if (rsd > CR0_RSD_MAX) {
+ rsd = CR0_RSD_MAX;
+ dev_warn(rs->dev, "%u Hz are too fast to express %u ns delay, clamping at %u ns\n",
+ rs->freq, rsd_nsecs,
+ CR0_RSD_MAX * 1000000000U / rs->freq);
+ }
+ rs->rsd = rsd;
+ }
+
+ rs->fifo_len = get_fifo_len(rs);
+ if (!rs->fifo_len) {
+ dev_err(&pdev->dev, "Failed to get fifo length\n");
+ ret = -EINVAL;
+ goto err_disable_spiclk;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, ROCKCHIP_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ctlr->auto_runtime_pm = true;
+ ctlr->bus_num = pdev->id;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST;
+ if (slave_mode) {
+ ctlr->mode_bits |= SPI_NO_CS;
+ ctlr->slave_abort = rockchip_spi_slave_abort;
+ } else {
+ ctlr->flags = SPI_MASTER_GPIO_SS;
+ ctlr->max_native_cs = ROCKCHIP_SPI_MAX_CS_NUM;
+ /*
+ * rk spi0 has two native cs, spi1..5 one cs only
+ * if num-cs is missing in the dts, default to 1
+ */
+ if (of_property_read_u32(np, "num-cs", &num_cs))
+ num_cs = 1;
+ ctlr->num_chipselect = num_cs;
+ ctlr->use_gpio_descriptors = true;
+ }
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4);
+ ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
+ ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
+
+ ctlr->setup = rockchip_spi_setup;
+ ctlr->set_cs = rockchip_spi_set_cs;
+ ctlr->transfer_one = rockchip_spi_transfer_one;
+ ctlr->max_transfer_size = rockchip_spi_max_transfer_size;
+ ctlr->handle_err = rockchip_spi_handle_err;
+
+ ctlr->dma_tx = dma_request_chan(rs->dev, "tx");
+ if (IS_ERR(ctlr->dma_tx)) {
+ /* Check tx to see if we need defer probing driver */
+ if (PTR_ERR(ctlr->dma_tx) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_disable_pm_runtime;
+ }
+ dev_warn(rs->dev, "Failed to request TX DMA channel\n");
+ ctlr->dma_tx = NULL;
+ }
+
+ ctlr->dma_rx = dma_request_chan(rs->dev, "rx");
+ if (IS_ERR(ctlr->dma_rx)) {
+ if (PTR_ERR(ctlr->dma_rx) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_free_dma_tx;
+ }
+ dev_warn(rs->dev, "Failed to request RX DMA channel\n");
+ ctlr->dma_rx = NULL;
+ }
+
+ if (ctlr->dma_tx && ctlr->dma_rx) {
+ rs->dma_addr_tx = mem->start + ROCKCHIP_SPI_TXDR;
+ rs->dma_addr_rx = mem->start + ROCKCHIP_SPI_RXDR;
+ ctlr->can_dma = rockchip_spi_can_dma;
+ }
+
+ switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
+ case ROCKCHIP_SPI_VER2_TYPE2:
+ rs->cs_high_supported = true;
+ ctlr->mode_bits |= SPI_CS_HIGH;
+ if (ctlr->can_dma && slave_mode)
+ rs->cs_inactive = true;
+ else
+ rs->cs_inactive = false;
+ break;
+ default:
+ rs->cs_inactive = false;
+ break;
+ }
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register controller\n");
+ goto err_free_dma_rx;
+ }
+
+ return 0;
+
+err_free_dma_rx:
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
+err_free_dma_tx:
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+err_disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+err_disable_spiclk:
+ clk_disable_unprepare(rs->spiclk);
+err_disable_apbclk:
+ clk_disable_unprepare(rs->apb_pclk);
+err_put_ctlr:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+static int rockchip_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = spi_controller_get(platform_get_drvdata(pdev));
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ clk_disable_unprepare(rs->spiclk);
+ clk_disable_unprepare(rs->apb_pclk);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
+
+ spi_controller_put(ctlr);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_spi_suspend(struct device *dev)
+{
+ int ret;
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+
+ ret = spi_controller_suspend(ctlr);
+ if (ret < 0)
+ return ret;
+
+ clk_disable_unprepare(rs->spiclk);
+ clk_disable_unprepare(rs->apb_pclk);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int rockchip_spi_resume(struct device *dev)
+{
+ int ret;
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = clk_prepare_enable(rs->apb_pclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(rs->spiclk);
+ if (ret < 0)
+ clk_disable_unprepare(rs->apb_pclk);
+
+ ret = spi_controller_resume(ctlr);
+ if (ret < 0) {
+ clk_disable_unprepare(rs->spiclk);
+ clk_disable_unprepare(rs->apb_pclk);
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int rockchip_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+
+ clk_disable_unprepare(rs->spiclk);
+ clk_disable_unprepare(rs->apb_pclk);
+
+ return 0;
+}
+
+static int rockchip_spi_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+
+ ret = clk_prepare_enable(rs->apb_pclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(rs->spiclk);
+ if (ret < 0)
+ clk_disable_unprepare(rs->apb_pclk);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops rockchip_spi_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
+ SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
+ rockchip_spi_runtime_resume, NULL)
+};
+
+static const struct of_device_id rockchip_spi_dt_match[] = {
+ { .compatible = "rockchip,px30-spi", },
+ { .compatible = "rockchip,rk3036-spi", },
+ { .compatible = "rockchip,rk3066-spi", },
+ { .compatible = "rockchip,rk3188-spi", },
+ { .compatible = "rockchip,rk3228-spi", },
+ { .compatible = "rockchip,rk3288-spi", },
+ { .compatible = "rockchip,rk3308-spi", },
+ { .compatible = "rockchip,rk3328-spi", },
+ { .compatible = "rockchip,rk3368-spi", },
+ { .compatible = "rockchip,rk3399-spi", },
+ { .compatible = "rockchip,rv1108-spi", },
+ { .compatible = "rockchip,rv1126-spi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
+
+static struct platform_driver rockchip_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &rockchip_spi_pm,
+ .of_match_table = of_match_ptr(rockchip_spi_dt_match),
+ },
+ .probe = rockchip_spi_probe,
+ .remove = rockchip_spi_remove,
+};
+
+module_platform_driver(rockchip_spi_driver);
+
+MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
+MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
new file mode 100644
index 000000000..24ec1c83f
--- /dev/null
+++ b/drivers/spi/spi-rpc-if.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// RPC-IF SPI/QSPI/Octa driver
+//
+// Copyright (C) 2018 ~ 2019 Renesas Solutions Corp.
+// Copyright (C) 2019 Macronix International Co., Ltd.
+// Copyright (C) 2019 - 2020 Cogent Embedded, Inc.
+//
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#include <memory/renesas-rpc-if.h>
+
+#include <asm/unaligned.h>
+
+static void rpcif_spi_mem_prepare(struct spi_device *spi_dev,
+ const struct spi_mem_op *spi_op,
+ u64 *offs, size_t *len)
+{
+ struct rpcif *rpc = spi_controller_get_devdata(spi_dev->controller);
+ struct rpcif_op rpc_op = { };
+
+ rpc_op.cmd.opcode = spi_op->cmd.opcode;
+ rpc_op.cmd.buswidth = spi_op->cmd.buswidth;
+
+ if (spi_op->addr.nbytes) {
+ rpc_op.addr.buswidth = spi_op->addr.buswidth;
+ rpc_op.addr.nbytes = spi_op->addr.nbytes;
+ rpc_op.addr.val = spi_op->addr.val;
+ }
+
+ if (spi_op->dummy.nbytes) {
+ rpc_op.dummy.buswidth = spi_op->dummy.buswidth;
+ rpc_op.dummy.ncycles = spi_op->dummy.nbytes * 8 /
+ spi_op->dummy.buswidth;
+ }
+
+ if (spi_op->data.nbytes || (offs && len)) {
+ rpc_op.data.buswidth = spi_op->data.buswidth;
+ rpc_op.data.nbytes = spi_op->data.nbytes;
+ switch (spi_op->data.dir) {
+ case SPI_MEM_DATA_IN:
+ rpc_op.data.dir = RPCIF_DATA_IN;
+ rpc_op.data.buf.in = spi_op->data.buf.in;
+ break;
+ case SPI_MEM_DATA_OUT:
+ rpc_op.data.dir = RPCIF_DATA_OUT;
+ rpc_op.data.buf.out = spi_op->data.buf.out;
+ break;
+ case SPI_MEM_NO_DATA:
+ rpc_op.data.dir = RPCIF_NO_DATA;
+ break;
+ }
+ } else {
+ rpc_op.data.dir = RPCIF_NO_DATA;
+ }
+
+ rpcif_prepare(rpc, &rpc_op, offs, len);
+}
+
+static bool rpcif_spi_mem_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
+ op->dummy.buswidth > 4 || op->cmd.buswidth > 4 ||
+ op->addr.nbytes > 4)
+ return false;
+
+ return true;
+}
+
+static ssize_t rpcif_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct rpcif *rpc =
+ spi_controller_get_devdata(desc->mem->spi->controller);
+
+ if (offs + desc->info.offset + len > U32_MAX)
+ return -EINVAL;
+
+ rpcif_spi_mem_prepare(desc->mem->spi, &desc->info.op_tmpl, &offs, &len);
+
+ return rpcif_dirmap_read(rpc, offs, len, buf);
+}
+
+static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct rpcif *rpc =
+ spi_controller_get_devdata(desc->mem->spi->controller);
+
+ if (desc->info.offset + desc->info.length > U32_MAX)
+ return -ENOTSUPP;
+
+ if (!rpcif_spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
+ return -ENOTSUPP;
+
+ if (!rpc->dirmap && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
+ return -ENOTSUPP;
+
+ if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+static int rpcif_spi_mem_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct rpcif *rpc =
+ spi_controller_get_devdata(mem->spi->controller);
+
+ rpcif_spi_mem_prepare(mem->spi, op, NULL, NULL);
+
+ return rpcif_manual_xfer(rpc);
+}
+
+static const struct spi_controller_mem_ops rpcif_spi_mem_ops = {
+ .supports_op = rpcif_spi_mem_supports_op,
+ .exec_op = rpcif_spi_mem_exec_op,
+ .dirmap_create = rpcif_spi_mem_dirmap_create,
+ .dirmap_read = rpcif_spi_mem_dirmap_read,
+};
+
+static int rpcif_spi_probe(struct platform_device *pdev)
+{
+ struct device *parent = pdev->dev.parent;
+ struct spi_controller *ctlr;
+ struct rpcif *rpc;
+ int error;
+
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*rpc));
+ if (!ctlr)
+ return -ENOMEM;
+
+ rpc = spi_controller_get_devdata(ctlr);
+ error = rpcif_sw_init(rpc, parent);
+ if (error)
+ return error;
+
+ platform_set_drvdata(pdev, ctlr);
+
+ ctlr->dev.of_node = parent->of_node;
+
+ rpcif_enable_rpm(rpc);
+
+ ctlr->num_chipselect = 1;
+ ctlr->mem_ops = &rpcif_spi_mem_ops;
+
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD;
+ ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
+
+ error = rpcif_hw_init(rpc, false);
+ if (error)
+ goto out_disable_rpm;
+
+ error = spi_register_controller(ctlr);
+ if (error) {
+ dev_err(&pdev->dev, "spi_register_controller failed\n");
+ goto out_disable_rpm;
+ }
+
+ return 0;
+
+out_disable_rpm:
+ rpcif_disable_rpm(rpc);
+ return error;
+}
+
+static int rpcif_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct rpcif *rpc = spi_controller_get_devdata(ctlr);
+
+ spi_unregister_controller(ctlr);
+ rpcif_disable_rpm(rpc);
+
+ return 0;
+}
+
+static int __maybe_unused rpcif_spi_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+
+ return spi_controller_suspend(ctlr);
+}
+
+static int __maybe_unused rpcif_spi_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+
+ return spi_controller_resume(ctlr);
+}
+
+static SIMPLE_DEV_PM_OPS(rpcif_spi_pm_ops, rpcif_spi_suspend, rpcif_spi_resume);
+
+static struct platform_driver rpcif_spi_driver = {
+ .probe = rpcif_spi_probe,
+ .remove = rpcif_spi_remove,
+ .driver = {
+ .name = "rpc-if-spi",
+#ifdef CONFIG_PM_SLEEP
+ .pm = &rpcif_spi_pm_ops,
+#endif
+ },
+};
+module_platform_driver(rpcif_spi_driver);
+
+MODULE_DESCRIPTION("Renesas RPC-IF SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
new file mode 100644
index 000000000..411b1307b
--- /dev/null
+++ b/drivers/spi/spi-rspi.c
@@ -0,0 +1,1455 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SH RSPI driver
+ *
+ * Copyright (C) 2012, 2013 Renesas Solutions Corp.
+ * Copyright (C) 2014 Glider bvba
+ *
+ * Based on spi-sh.c:
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sh_dma.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/rspi.h>
+#include <linux/spinlock.h>
+
+#define RSPI_SPCR 0x00 /* Control Register */
+#define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
+#define RSPI_SPPCR 0x02 /* Pin Control Register */
+#define RSPI_SPSR 0x03 /* Status Register */
+#define RSPI_SPDR 0x04 /* Data Register */
+#define RSPI_SPSCR 0x08 /* Sequence Control Register */
+#define RSPI_SPSSR 0x09 /* Sequence Status Register */
+#define RSPI_SPBR 0x0a /* Bit Rate Register */
+#define RSPI_SPDCR 0x0b /* Data Control Register */
+#define RSPI_SPCKD 0x0c /* Clock Delay Register */
+#define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
+#define RSPI_SPND 0x0e /* Next-Access Delay Register */
+#define RSPI_SPCR2 0x0f /* Control Register 2 (SH only) */
+#define RSPI_SPCMD0 0x10 /* Command Register 0 */
+#define RSPI_SPCMD1 0x12 /* Command Register 1 */
+#define RSPI_SPCMD2 0x14 /* Command Register 2 */
+#define RSPI_SPCMD3 0x16 /* Command Register 3 */
+#define RSPI_SPCMD4 0x18 /* Command Register 4 */
+#define RSPI_SPCMD5 0x1a /* Command Register 5 */
+#define RSPI_SPCMD6 0x1c /* Command Register 6 */
+#define RSPI_SPCMD7 0x1e /* Command Register 7 */
+#define RSPI_SPCMD(i) (RSPI_SPCMD0 + (i) * 2)
+#define RSPI_NUM_SPCMD 8
+#define RSPI_RZ_NUM_SPCMD 4
+#define QSPI_NUM_SPCMD 4
+
+/* RSPI on RZ only */
+#define RSPI_SPBFCR 0x20 /* Buffer Control Register */
+#define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
+
+/* QSPI only */
+#define QSPI_SPBFCR 0x18 /* Buffer Control Register */
+#define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
+#define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
+#define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
+#define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
+#define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
+#define QSPI_SPBMUL(i) (QSPI_SPBMUL0 + (i) * 4)
+
+/* SPCR - Control Register */
+#define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
+#define SPCR_SPE 0x40 /* Function Enable */
+#define SPCR_SPTIE 0x20 /* Transmit Interrupt Enable */
+#define SPCR_SPEIE 0x10 /* Error Interrupt Enable */
+#define SPCR_MSTR 0x08 /* Master/Slave Mode Select */
+#define SPCR_MODFEN 0x04 /* Mode Fault Error Detection Enable */
+/* RSPI on SH only */
+#define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
+#define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
+/* QSPI on R-Car Gen2 only */
+#define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
+#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
+
+/* SSLP - Slave Select Polarity Register */
+#define SSLP_SSLP(i) BIT(i) /* SSLi Signal Polarity Setting */
+
+/* SPPCR - Pin Control Register */
+#define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
+#define SPPCR_MOIFV 0x10 /* MOSI Idle Fixed Value */
+#define SPPCR_SPOM 0x04
+#define SPPCR_SPLP2 0x02 /* Loopback Mode 2 (non-inverting) */
+#define SPPCR_SPLP 0x01 /* Loopback Mode (inverting) */
+
+#define SPPCR_IO3FV 0x04 /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
+#define SPPCR_IO2FV 0x04 /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
+
+/* SPSR - Status Register */
+#define SPSR_SPRF 0x80 /* Receive Buffer Full Flag */
+#define SPSR_TEND 0x40 /* Transmit End */
+#define SPSR_SPTEF 0x20 /* Transmit Buffer Empty Flag */
+#define SPSR_PERF 0x08 /* Parity Error Flag */
+#define SPSR_MODF 0x04 /* Mode Fault Error Flag */
+#define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
+#define SPSR_OVRF 0x01 /* Overrun Error Flag (RSPI only) */
+
+/* SPSCR - Sequence Control Register */
+#define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
+
+/* SPSSR - Sequence Status Register */
+#define SPSSR_SPECM_MASK 0x70 /* Command Error Mask */
+#define SPSSR_SPCP_MASK 0x07 /* Command Pointer Mask */
+
+/* SPDCR - Data Control Register */
+#define SPDCR_TXDMY 0x80 /* Dummy Data Transmission Enable */
+#define SPDCR_SPLW1 0x40 /* Access Width Specification (RZ) */
+#define SPDCR_SPLW0 0x20 /* Access Width Specification (RZ) */
+#define SPDCR_SPLLWORD (SPDCR_SPLW1 | SPDCR_SPLW0)
+#define SPDCR_SPLWORD SPDCR_SPLW1
+#define SPDCR_SPLBYTE SPDCR_SPLW0
+#define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
+#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select (SH) */
+#define SPDCR_SLSEL1 0x08
+#define SPDCR_SLSEL0 0x04
+#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select (SH) */
+#define SPDCR_SPFC1 0x02
+#define SPDCR_SPFC0 0x01
+#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) (SH) */
+
+/* SPCKD - Clock Delay Register */
+#define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
+
+/* SSLND - Slave Select Negation Delay Register */
+#define SSLND_SLNDL_MASK 0x07 /* SSL Negation Delay Setting (1-8) */
+
+/* SPND - Next-Access Delay Register */
+#define SPND_SPNDL_MASK 0x07 /* Next-Access Delay Setting (1-8) */
+
+/* SPCR2 - Control Register 2 */
+#define SPCR2_PTE 0x08 /* Parity Self-Test Enable */
+#define SPCR2_SPIE 0x04 /* Idle Interrupt Enable */
+#define SPCR2_SPOE 0x02 /* Odd Parity Enable (vs. Even) */
+#define SPCR2_SPPE 0x01 /* Parity Enable */
+
+/* SPCMDn - Command Registers */
+#define SPCMD_SCKDEN 0x8000 /* Clock Delay Setting Enable */
+#define SPCMD_SLNDEN 0x4000 /* SSL Negation Delay Setting Enable */
+#define SPCMD_SPNDEN 0x2000 /* Next-Access Delay Enable */
+#define SPCMD_LSBF 0x1000 /* LSB First */
+#define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
+#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
+#define SPCMD_SPB_8BIT 0x0000 /* QSPI only */
+#define SPCMD_SPB_16BIT 0x0100
+#define SPCMD_SPB_20BIT 0x0000
+#define SPCMD_SPB_24BIT 0x0100
+#define SPCMD_SPB_32BIT 0x0200
+#define SPCMD_SSLKP 0x0080 /* SSL Signal Level Keeping */
+#define SPCMD_SPIMOD_MASK 0x0060 /* SPI Operating Mode (QSPI only) */
+#define SPCMD_SPIMOD1 0x0040
+#define SPCMD_SPIMOD0 0x0020
+#define SPCMD_SPIMOD_SINGLE 0
+#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
+#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
+#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
+#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */
+#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
+#define SPCMD_BRDV(brdv) ((brdv) << 2)
+#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
+#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
+
+/* SPBFCR - Buffer Control Register */
+#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset */
+#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
+#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
+#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
+/* QSPI on R-Car Gen2 */
+#define SPBFCR_TXTRG_1B 0x00 /* 31 bytes (1 byte available) */
+#define SPBFCR_TXTRG_32B 0x30 /* 0 byte (32 bytes available) */
+#define SPBFCR_RXTRG_1B 0x00 /* 1 byte (31 bytes available) */
+#define SPBFCR_RXTRG_32B 0x07 /* 32 bytes (0 byte available) */
+
+#define QSPI_BUFFER_SIZE 32u
+
+struct rspi_data {
+ void __iomem *addr;
+ u32 speed_hz;
+ struct spi_controller *ctlr;
+ struct platform_device *pdev;
+ wait_queue_head_t wait;
+ spinlock_t lock; /* Protects RMW-access to RSPI_SSLP */
+ struct clk *clk;
+ u16 spcmd;
+ u8 spsr;
+ u8 sppcr;
+ int rx_irq, tx_irq;
+ const struct spi_ops *ops;
+
+ unsigned dma_callbacked:1;
+ unsigned byte_access:1;
+};
+
+static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
+{
+ iowrite8(data, rspi->addr + offset);
+}
+
+static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
+{
+ iowrite16(data, rspi->addr + offset);
+}
+
+static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
+{
+ iowrite32(data, rspi->addr + offset);
+}
+
+static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
+{
+ return ioread8(rspi->addr + offset);
+}
+
+static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
+{
+ return ioread16(rspi->addr + offset);
+}
+
+static void rspi_write_data(const struct rspi_data *rspi, u16 data)
+{
+ if (rspi->byte_access)
+ rspi_write8(rspi, data, RSPI_SPDR);
+ else /* 16 bit */
+ rspi_write16(rspi, data, RSPI_SPDR);
+}
+
+static u16 rspi_read_data(const struct rspi_data *rspi)
+{
+ if (rspi->byte_access)
+ return rspi_read8(rspi, RSPI_SPDR);
+ else /* 16 bit */
+ return rspi_read16(rspi, RSPI_SPDR);
+}
+
+/* optional functions */
+struct spi_ops {
+ int (*set_config_register)(struct rspi_data *rspi, int access_size);
+ int (*transfer_one)(struct spi_controller *ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer);
+ u16 extra_mode_bits;
+ u16 min_div;
+ u16 max_div;
+ u16 flags;
+ u16 fifo_size;
+ u8 num_hw_ss;
+};
+
+static void rspi_set_rate(struct rspi_data *rspi)
+{
+ unsigned long clksrc;
+ int brdv = 0, spbr;
+
+ clksrc = clk_get_rate(rspi->clk);
+ spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
+ while (spbr > 255 && brdv < 3) {
+ brdv++;
+ spbr = DIV_ROUND_UP(spbr + 1, 2) - 1;
+ }
+
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+ rspi->spcmd |= SPCMD_BRDV(brdv);
+ rspi->speed_hz = DIV_ROUND_UP(clksrc, (2U << brdv) * (spbr + 1));
+}
+
+/*
+ * functions for RSPI on legacy SH
+ */
+static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ rspi_set_rate(rspi);
+
+ /* Disable dummy transmission, set 16-bit word access, 1 frame */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 0;
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Sets parity, interrupt mask */
+ rspi_write8(rspi, 0x00, RSPI_SPCR2);
+
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+/*
+ * functions for RSPI on RZ
+ */
+static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ rspi_set_rate(rspi);
+
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
+ rspi->byte_access = 1;
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+/*
+ * functions for QSPI
+ */
+static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ unsigned long clksrc;
+ int brdv = 0, spbr;
+
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ clksrc = clk_get_rate(rspi->clk);
+ if (rspi->speed_hz >= clksrc) {
+ spbr = 0;
+ rspi->speed_hz = clksrc;
+ } else {
+ spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz);
+ while (spbr > 255 && brdv < 3) {
+ brdv++;
+ spbr = DIV_ROUND_UP(spbr, 2);
+ }
+ spbr = clamp(spbr, 0, 255);
+ rspi->speed_hz = DIV_ROUND_UP(clksrc, (2U << brdv) * spbr);
+ }
+ rspi_write8(rspi, spbr, RSPI_SPBR);
+ rspi->spcmd |= SPCMD_BRDV(brdv);
+
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 1;
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Data Length Setting */
+ if (access_size == 8)
+ rspi->spcmd |= SPCMD_SPB_8BIT;
+ else if (access_size == 16)
+ rspi->spcmd |= SPCMD_SPB_16BIT;
+ else
+ rspi->spcmd |= SPCMD_SPB_32BIT;
+
+ rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
+
+ /* Resets transfer data length */
+ rspi_write32(rspi, 0, QSPI_SPBMUL0);
+
+ /* Resets transmit and receive buffer */
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
+ /* Sets buffer to allow normal operation */
+ rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg)
+{
+ u8 data;
+
+ data = rspi_read8(rspi, reg);
+ data &= ~mask;
+ data |= (val & mask);
+ rspi_write8(rspi, data, reg);
+}
+
+static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
+ unsigned int len)
+{
+ unsigned int n;
+
+ n = min(len, QSPI_BUFFER_SIZE);
+
+ if (len >= QSPI_BUFFER_SIZE) {
+ /* sets triggering number to 32 bytes */
+ qspi_update(rspi, SPBFCR_TXTRG_MASK,
+ SPBFCR_TXTRG_32B, QSPI_SPBFCR);
+ } else {
+ /* sets triggering number to 1 byte */
+ qspi_update(rspi, SPBFCR_TXTRG_MASK,
+ SPBFCR_TXTRG_1B, QSPI_SPBFCR);
+ }
+
+ return n;
+}
+
+static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
+{
+ unsigned int n;
+
+ n = min(len, QSPI_BUFFER_SIZE);
+
+ if (len >= QSPI_BUFFER_SIZE) {
+ /* sets triggering number to 32 bytes */
+ qspi_update(rspi, SPBFCR_RXTRG_MASK,
+ SPBFCR_RXTRG_32B, QSPI_SPBFCR);
+ } else {
+ /* sets triggering number to 1 byte */
+ qspi_update(rspi, SPBFCR_RXTRG_MASK,
+ SPBFCR_RXTRG_1B, QSPI_SPBFCR);
+ }
+ return n;
+}
+
+static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
+{
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
+}
+
+static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
+{
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
+}
+
+static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
+ u8 enable_bit)
+{
+ int ret;
+
+ rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (rspi->spsr & wait_mask)
+ return 0;
+
+ rspi_enable_irq(rspi, enable_bit);
+ ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
+ if (ret == 0 && !(rspi->spsr & wait_mask))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
+{
+ return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+}
+
+static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
+{
+ return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
+}
+
+static int rspi_data_out(struct rspi_data *rspi, u8 data)
+{
+ int error = rspi_wait_for_tx_empty(rspi);
+ if (error < 0) {
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
+ return error;
+ }
+ rspi_write_data(rspi, data);
+ return 0;
+}
+
+static int rspi_data_in(struct rspi_data *rspi)
+{
+ int error;
+ u8 data;
+
+ error = rspi_wait_for_rx_full(rspi);
+ if (error < 0) {
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
+ return error;
+ }
+ data = rspi_read_data(rspi);
+ return data;
+}
+
+static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
+ unsigned int n)
+{
+ while (n-- > 0) {
+ if (tx) {
+ int ret = rspi_data_out(rspi, *tx++);
+ if (ret < 0)
+ return ret;
+ }
+ if (rx) {
+ int ret = rspi_data_in(rspi);
+ if (ret < 0)
+ return ret;
+ *rx++ = ret;
+ }
+ }
+
+ return 0;
+}
+
+static void rspi_dma_complete(void *arg)
+{
+ struct rspi_data *rspi = arg;
+
+ rspi->dma_callbacked = 1;
+ wake_up_interruptible(&rspi->wait);
+}
+
+static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
+ struct sg_table *rx)
+{
+ struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ u8 irq_mask = 0;
+ unsigned int other_irq = 0;
+ dma_cookie_t cookie;
+ int ret;
+
+ /* First prepare and submit the DMA request(s), as this may fail */
+ if (rx) {
+ desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
+ rx->nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ ret = -EAGAIN;
+ goto no_dma_rx;
+ }
+
+ desc_rx->callback = rspi_dma_complete;
+ desc_rx->callback_param = rspi;
+ cookie = dmaengine_submit(desc_rx);
+ if (dma_submit_error(cookie)) {
+ ret = cookie;
+ goto no_dma_rx;
+ }
+
+ irq_mask |= SPCR_SPRIE;
+ }
+
+ if (tx) {
+ desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
+ tx->nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ ret = -EAGAIN;
+ goto no_dma_tx;
+ }
+
+ if (rx) {
+ /* No callback */
+ desc_tx->callback = NULL;
+ } else {
+ desc_tx->callback = rspi_dma_complete;
+ desc_tx->callback_param = rspi;
+ }
+ cookie = dmaengine_submit(desc_tx);
+ if (dma_submit_error(cookie)) {
+ ret = cookie;
+ goto no_dma_tx;
+ }
+
+ irq_mask |= SPCR_SPTIE;
+ }
+
+ /*
+ * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
+ * called. So, this driver disables the IRQ while DMA transfer.
+ */
+ if (tx)
+ disable_irq(other_irq = rspi->tx_irq);
+ if (rx && rspi->rx_irq != other_irq)
+ disable_irq(rspi->rx_irq);
+
+ rspi_enable_irq(rspi, irq_mask);
+ rspi->dma_callbacked = 0;
+
+ /* Now start DMA */
+ if (rx)
+ dma_async_issue_pending(rspi->ctlr->dma_rx);
+ if (tx)
+ dma_async_issue_pending(rspi->ctlr->dma_tx);
+
+ ret = wait_event_interruptible_timeout(rspi->wait,
+ rspi->dma_callbacked, HZ);
+ if (ret > 0 && rspi->dma_callbacked) {
+ ret = 0;
+ if (tx)
+ dmaengine_synchronize(rspi->ctlr->dma_tx);
+ if (rx)
+ dmaengine_synchronize(rspi->ctlr->dma_rx);
+ } else {
+ if (!ret) {
+ dev_err(&rspi->ctlr->dev, "DMA timeout\n");
+ ret = -ETIMEDOUT;
+ }
+ if (tx)
+ dmaengine_terminate_sync(rspi->ctlr->dma_tx);
+ if (rx)
+ dmaengine_terminate_sync(rspi->ctlr->dma_rx);
+ }
+
+ rspi_disable_irq(rspi, irq_mask);
+
+ if (tx)
+ enable_irq(rspi->tx_irq);
+ if (rx && rspi->rx_irq != other_irq)
+ enable_irq(rspi->rx_irq);
+
+ return ret;
+
+no_dma_tx:
+ if (rx)
+ dmaengine_terminate_sync(rspi->ctlr->dma_rx);
+no_dma_rx:
+ if (ret == -EAGAIN) {
+ dev_warn_once(&rspi->ctlr->dev,
+ "DMA not available, falling back to PIO\n");
+ }
+ return ret;
+}
+
+static void rspi_receive_init(const struct rspi_data *rspi)
+{
+ u8 spsr;
+
+ spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ rspi_read_data(rspi); /* dummy read */
+ if (spsr & SPSR_OVRF)
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
+ RSPI_SPSR);
+}
+
+static void rspi_rz_receive_init(const struct rspi_data *rspi)
+{
+ rspi_receive_init(rspi);
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
+ rspi_write8(rspi, 0, RSPI_SPBFCR);
+}
+
+static void qspi_receive_init(const struct rspi_data *rspi)
+{
+ u8 spsr;
+
+ spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ rspi_read_data(rspi); /* dummy read */
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
+ rspi_write8(rspi, 0, QSPI_SPBFCR);
+}
+
+static bool __rspi_can_dma(const struct rspi_data *rspi,
+ const struct spi_transfer *xfer)
+{
+ return xfer->len > rspi->ops->fifo_size;
+}
+
+static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
+
+ return __rspi_can_dma(rspi, xfer);
+}
+
+static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
+ return -EAGAIN;
+
+ /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
+ return rspi_dma_transfer(rspi, &xfer->tx_sg,
+ xfer->rx_buf ? &xfer->rx_sg : NULL);
+}
+
+static int rspi_common_transfer(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int ret;
+
+ xfer->effective_speed_hz = rspi->speed_hz;
+
+ ret = rspi_dma_check_then_transfer(rspi, xfer);
+ if (ret != -EAGAIN)
+ return ret;
+
+ ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the last transmission */
+ rspi_wait_for_tx_empty(rspi);
+
+ return 0;
+}
+
+static int rspi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
+ u8 spcr;
+
+ spcr = rspi_read8(rspi, RSPI_SPCR);
+ if (xfer->rx_buf) {
+ rspi_receive_init(rspi);
+ spcr &= ~SPCR_TXMD;
+ } else {
+ spcr |= SPCR_TXMD;
+ }
+ rspi_write8(rspi, spcr, RSPI_SPCR);
+
+ return rspi_common_transfer(rspi, xfer);
+}
+
+static int rspi_rz_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
+
+ rspi_rz_receive_init(rspi);
+
+ return rspi_common_transfer(rspi, xfer);
+}
+
+static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
+ u8 *rx, unsigned int len)
+{
+ unsigned int i, n;
+ int ret;
+
+ while (len > 0) {
+ n = qspi_set_send_trigger(rspi, len);
+ qspi_set_receive_trigger(rspi, len);
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
+ return ret;
+ }
+ for (i = 0; i < n; i++)
+ rspi_write_data(rspi, *tx++);
+
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
+ return ret;
+ }
+ for (i = 0; i < n; i++)
+ *rx++ = rspi_read_data(rspi);
+
+ len -= n;
+ }
+
+ return 0;
+}
+
+static int qspi_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int ret;
+
+ qspi_receive_init(rspi);
+
+ ret = rspi_dma_check_then_transfer(rspi, xfer);
+ if (ret != -EAGAIN)
+ return ret;
+
+ return qspi_trigger_transfer_out_in(rspi, xfer->tx_buf,
+ xfer->rx_buf, xfer->len);
+}
+
+static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ const u8 *tx = xfer->tx_buf;
+ unsigned int n = xfer->len;
+ unsigned int i, len;
+ int ret;
+
+ if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
+ ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
+ while (n > 0) {
+ len = qspi_set_send_trigger(rspi, n);
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
+ return ret;
+ }
+ for (i = 0; i < len; i++)
+ rspi_write_data(rspi, *tx++);
+
+ n -= len;
+ }
+
+ /* Wait for the last transmission */
+ rspi_wait_for_tx_empty(rspi);
+
+ return 0;
+}
+
+static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ u8 *rx = xfer->rx_buf;
+ unsigned int n = xfer->len;
+ unsigned int i, len;
+ int ret;
+
+ if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
+ ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
+ while (n > 0) {
+ len = qspi_set_receive_trigger(rspi, n);
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
+ return ret;
+ }
+ for (i = 0; i < len; i++)
+ *rx++ = rspi_read_data(rspi);
+
+ n -= len;
+ }
+
+ return 0;
+}
+
+static int qspi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
+
+ xfer->effective_speed_hz = rspi->speed_hz;
+ if (spi->mode & SPI_LOOP) {
+ return qspi_transfer_out_in(rspi, xfer);
+ } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Write */
+ return qspi_transfer_out(rspi, xfer);
+ } else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Read */
+ return qspi_transfer_in(rspi, xfer);
+ } else {
+ /* Single SPI Transfer */
+ return qspi_transfer_out_in(rspi, xfer);
+ }
+}
+
+static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
+{
+ if (xfer->tx_buf)
+ switch (xfer->tx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL;
+ default:
+ return 0;
+ }
+ if (xfer->rx_buf)
+ switch (xfer->rx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int qspi_setup_sequencer(struct rspi_data *rspi,
+ const struct spi_message *msg)
+{
+ const struct spi_transfer *xfer;
+ unsigned int i = 0, len = 0;
+ u16 current_mode = 0xffff, mode;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ mode = qspi_transfer_mode(xfer);
+ if (mode == current_mode) {
+ len += xfer->len;
+ continue;
+ }
+
+ /* Transfer mode change */
+ if (i) {
+ /* Set transfer data length of previous transfer */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ }
+
+ if (i >= QSPI_NUM_SPCMD) {
+ dev_err(&msg->spi->dev,
+ "Too many different transfer modes");
+ return -EINVAL;
+ }
+
+ /* Program transfer mode for this transfer */
+ rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
+ current_mode = mode;
+ len = xfer->len;
+ i++;
+ }
+ if (i) {
+ /* Set final transfer data length and sequence length */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ rspi_write8(rspi, i - 1, RSPI_SPSCR);
+ }
+
+ return 0;
+}
+
+static int rspi_setup(struct spi_device *spi)
+{
+ struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
+ u8 sslp;
+
+ if (spi->cs_gpiod)
+ return 0;
+
+ pm_runtime_get_sync(&rspi->pdev->dev);
+ spin_lock_irq(&rspi->lock);
+
+ sslp = rspi_read8(rspi, RSPI_SSLP);
+ if (spi->mode & SPI_CS_HIGH)
+ sslp |= SSLP_SSLP(spi->chip_select);
+ else
+ sslp &= ~SSLP_SSLP(spi->chip_select);
+ rspi_write8(rspi, sslp, RSPI_SSLP);
+
+ spin_unlock_irq(&rspi->lock);
+ pm_runtime_put(&rspi->pdev->dev);
+ return 0;
+}
+
+static int rspi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = msg->spi;
+ const struct spi_transfer *xfer;
+ int ret;
+
+ /*
+ * As the Bit Rate Register must not be changed while the device is
+ * active, all transfers in a message must use the same bit rate.
+ * In theory, the sequencer could be enabled, and each Command Register
+ * could divide the base bit rate by a different value.
+ * However, most RSPI variants do not have Transfer Data Length
+ * Multiplier Setting Registers, so each sequence step would be limited
+ * to a single word, making this feature unsuitable for large
+ * transfers, which would gain most from it.
+ */
+ rspi->speed_hz = spi->max_speed_hz;
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->speed_hz < rspi->speed_hz)
+ rspi->speed_hz = xfer->speed_hz;
+ }
+
+ rspi->spcmd = SPCMD_SSLKP;
+ if (spi->mode & SPI_CPOL)
+ rspi->spcmd |= SPCMD_CPOL;
+ if (spi->mode & SPI_CPHA)
+ rspi->spcmd |= SPCMD_CPHA;
+ if (spi->mode & SPI_LSB_FIRST)
+ rspi->spcmd |= SPCMD_LSBF;
+
+ /* Configure slave signal to assert */
+ rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
+ : spi->chip_select);
+
+ /* CMOS output mode and MOSI signal from previous transfer */
+ rspi->sppcr = 0;
+ if (spi->mode & SPI_LOOP)
+ rspi->sppcr |= SPPCR_SPLP;
+
+ rspi->ops->set_config_register(rspi, 8);
+
+ if (msg->spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
+ /* Setup sequencer for messages with multiple transfer modes */
+ ret = qspi_setup_sequencer(rspi, msg);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable SPI function in master mode */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
+ return 0;
+}
+
+static int rspi_unprepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
+
+ /* Disable SPI function */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+
+ /* Reset sequencer for Single SPI Transfers */
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+ rspi_write8(rspi, 0, RSPI_SPSCR);
+ return 0;
+}
+
+static irqreturn_t rspi_irq_mux(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+ irqreturn_t ret = IRQ_NONE;
+ u8 disable_irq = 0;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ disable_irq |= SPCR_SPRIE;
+ if (spsr & SPSR_SPTEF)
+ disable_irq |= SPCR_SPTIE;
+
+ if (disable_irq) {
+ ret = IRQ_HANDLED;
+ rspi_disable_irq(rspi, disable_irq);
+ wake_up(&rspi->wait);
+ }
+
+ return ret;
+}
+
+static irqreturn_t rspi_irq_rx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF) {
+ rspi_disable_irq(rspi, SPCR_SPRIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
+static irqreturn_t rspi_irq_tx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPTEF) {
+ rspi_disable_irq(rspi, SPCR_SPTIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
+static struct dma_chan *rspi_request_dma_chan(struct device *dev,
+ enum dma_transfer_direction dir,
+ unsigned int id,
+ dma_addr_t port_addr)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ (void *)(unsigned long)id, dev,
+ dir == DMA_MEM_TO_DEV ? "tx" : "rx");
+ if (!chan) {
+ dev_warn(dev, "dma_request_slave_channel_compat failed\n");
+ return NULL;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.dst_addr = port_addr + RSPI_SPDR;
+ cfg.src_addr = port_addr + RSPI_SPDR;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ cfg.direction = dir;
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
+ dma_release_channel(chan);
+ return NULL;
+ }
+
+ return chan;
+}
+
+static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
+ const struct resource *res)
+{
+ const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
+ unsigned int dma_tx_id, dma_rx_id;
+
+ if (dev->of_node) {
+ /* In the OF case we will get the slave IDs from the DT */
+ dma_tx_id = 0;
+ dma_rx_id = 0;
+ } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
+ dma_tx_id = rspi_pd->dma_tx_id;
+ dma_rx_id = rspi_pd->dma_rx_id;
+ } else {
+ /* The driver assumes no error. */
+ return 0;
+ }
+
+ ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
+ res->start);
+ if (!ctlr->dma_tx)
+ return -ENODEV;
+
+ ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
+ res->start);
+ if (!ctlr->dma_rx) {
+ dma_release_channel(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
+ return -ENODEV;
+ }
+
+ ctlr->can_dma = rspi_can_dma;
+ dev_info(dev, "DMA available");
+ return 0;
+}
+
+static void rspi_release_dma(struct spi_controller *ctlr)
+{
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
+}
+
+static int rspi_remove(struct platform_device *pdev)
+{
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ rspi_release_dma(rspi->ctlr);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct spi_ops rspi_ops = {
+ .set_config_register = rspi_set_config_register,
+ .transfer_one = rspi_transfer_one,
+ .min_div = 2,
+ .max_div = 4096,
+ .flags = SPI_CONTROLLER_MUST_TX,
+ .fifo_size = 8,
+ .num_hw_ss = 2,
+};
+
+static const struct spi_ops rspi_rz_ops = {
+ .set_config_register = rspi_rz_set_config_register,
+ .transfer_one = rspi_rz_transfer_one,
+ .min_div = 2,
+ .max_div = 4096,
+ .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
+ .fifo_size = 8, /* 8 for TX, 32 for RX */
+ .num_hw_ss = 1,
+};
+
+static const struct spi_ops qspi_ops = {
+ .set_config_register = qspi_set_config_register,
+ .transfer_one = qspi_transfer_one,
+ .extra_mode_bits = SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_RX_DUAL | SPI_RX_QUAD,
+ .min_div = 1,
+ .max_div = 4080,
+ .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
+ .fifo_size = 32,
+ .num_hw_ss = 1,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id rspi_of_match[] = {
+ /* RSPI on legacy SH */
+ { .compatible = "renesas,rspi", .data = &rspi_ops },
+ /* RSPI on RZ/A1H */
+ { .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
+ /* QSPI on R-Car Gen2 */
+ { .compatible = "renesas,qspi", .data = &qspi_ops },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rspi_of_match);
+
+static void rspi_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
+static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
+{
+ struct reset_control *rstc;
+ u32 num_cs;
+ int error;
+
+ /* Parse DT properties */
+ error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (error) {
+ dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
+ return error;
+ }
+
+ ctlr->num_chipselect = num_cs;
+
+ rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(rstc))
+ return dev_err_probe(dev, PTR_ERR(rstc),
+ "failed to get reset ctrl\n");
+
+ error = reset_control_deassert(rstc);
+ if (error) {
+ dev_err(dev, "failed to deassert reset %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(dev, rspi_reset_control_assert, rstc);
+ if (error) {
+ dev_err(dev, "failed to register assert devm action, %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+#else
+#define rspi_of_match NULL
+static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+static int rspi_request_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, const char *suffix,
+ void *dev_id)
+{
+ const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
+ dev_name(dev), suffix);
+ if (!name)
+ return -ENOMEM;
+
+ return devm_request_irq(dev, irq, handler, 0, name, dev_id);
+}
+
+static int rspi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct spi_controller *ctlr;
+ struct rspi_data *rspi;
+ int ret;
+ const struct rspi_plat_data *rspi_pd;
+ const struct spi_ops *ops;
+ unsigned long clksrc;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
+ if (ctlr == NULL)
+ return -ENOMEM;
+
+ ops = of_device_get_match_data(&pdev->dev);
+ if (ops) {
+ ret = rspi_parse_dt(&pdev->dev, ctlr);
+ if (ret)
+ goto error1;
+ } else {
+ ops = (struct spi_ops *)pdev->id_entry->driver_data;
+ rspi_pd = dev_get_platdata(&pdev->dev);
+ if (rspi_pd && rspi_pd->num_chipselect)
+ ctlr->num_chipselect = rspi_pd->num_chipselect;
+ else
+ ctlr->num_chipselect = 2; /* default */
+ }
+
+ rspi = spi_controller_get_devdata(ctlr);
+ platform_set_drvdata(pdev, rspi);
+ rspi->ops = ops;
+ rspi->ctlr = ctlr;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rspi->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rspi->addr)) {
+ ret = PTR_ERR(rspi->addr);
+ goto error1;
+ }
+
+ rspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rspi->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(rspi->clk);
+ goto error1;
+ }
+
+ rspi->pdev = pdev;
+ pm_runtime_enable(&pdev->dev);
+
+ init_waitqueue_head(&rspi->wait);
+ spin_lock_init(&rspi->lock);
+
+ ctlr->bus_num = pdev->id;
+ ctlr->setup = rspi_setup;
+ ctlr->auto_runtime_pm = true;
+ ctlr->transfer_one = ops->transfer_one;
+ ctlr->prepare_message = rspi_prepare_message;
+ ctlr->unprepare_message = rspi_unprepare_message;
+ ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_LOOP | ops->extra_mode_bits;
+ clksrc = clk_get_rate(rspi->clk);
+ ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, ops->max_div);
+ ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, ops->min_div);
+ ctlr->flags = ops->flags;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = rspi->ops->num_hw_ss;
+
+ ret = platform_get_irq_byname_optional(pdev, "rx");
+ if (ret < 0) {
+ ret = platform_get_irq_byname_optional(pdev, "mux");
+ if (ret < 0)
+ ret = platform_get_irq(pdev, 0);
+ if (ret >= 0)
+ rspi->rx_irq = rspi->tx_irq = ret;
+ } else {
+ rspi->rx_irq = ret;
+ ret = platform_get_irq_byname(pdev, "tx");
+ if (ret >= 0)
+ rspi->tx_irq = ret;
+ }
+
+ if (rspi->rx_irq == rspi->tx_irq) {
+ /* Single multiplexed interrupt */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
+ "mux", rspi);
+ } else {
+ /* Multi-interrupt mode, only SPRI and SPTI are used */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
+ "rx", rspi);
+ if (!ret)
+ ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
+ rspi_irq_tx, "tx", rspi);
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq error\n");
+ goto error2;
+ }
+
+ ret = rspi_request_dma(&pdev->dev, ctlr, res);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "DMA not available, using PIO\n");
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
+ goto error3;
+ }
+
+ dev_info(&pdev->dev, "probed\n");
+
+ return 0;
+
+error3:
+ rspi_release_dma(ctlr);
+error2:
+ pm_runtime_disable(&pdev->dev);
+error1:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+static const struct platform_device_id spi_driver_ids[] = {
+ { "rspi", (kernel_ulong_t)&rspi_ops },
+ {},
+};
+
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+ struct rspi_data *rspi = dev_get_drvdata(dev);
+
+ return spi_controller_suspend(rspi->ctlr);
+}
+
+static int rspi_resume(struct device *dev)
+{
+ struct rspi_data *rspi = dev_get_drvdata(dev);
+
+ return spi_controller_resume(rspi->ctlr);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS &rspi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver rspi_driver = {
+ .probe = rspi_probe,
+ .remove = rspi_remove,
+ .id_table = spi_driver_ids,
+ .driver = {
+ .name = "renesas_spi",
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(rspi_of_match),
+ },
+};
+module_platform_driver(rspi_driver);
+
+MODULE_DESCRIPTION("Renesas RSPI bus driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yoshihiro Shimoda");
diff --git a/drivers/spi/spi-s3c24xx-regs.h b/drivers/spi/spi-s3c24xx-regs.h
new file mode 100644
index 000000000..f51464ab5
--- /dev/null
+++ b/drivers/spi/spi-s3c24xx-regs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2004 Fetron GmbH
+ *
+ * S3C2410 SPI register definition
+ */
+
+#ifndef __SPI_S3C2410_H
+#define __SPI_S3C2410_H
+
+#define S3C2410_SPCON (0x00)
+
+#define S3C2410_SPCON_SMOD_DMA (2 << 5) /* DMA mode */
+#define S3C2410_SPCON_SMOD_INT (1 << 5) /* interrupt mode */
+#define S3C2410_SPCON_SMOD_POLL (0 << 5) /* polling mode */
+#define S3C2410_SPCON_ENSCK (1 << 4) /* Enable SCK */
+#define S3C2410_SPCON_MSTR (1 << 3) /* Master:1, Slave:0 select */
+#define S3C2410_SPCON_CPOL_HIGH (1 << 2) /* Clock polarity select */
+#define S3C2410_SPCON_CPOL_LOW (0 << 2) /* Clock polarity select */
+
+#define S3C2410_SPCON_CPHA_FMTB (1 << 1) /* Clock Phase Select */
+#define S3C2410_SPCON_CPHA_FMTA (0 << 1) /* Clock Phase Select */
+
+#define S3C2410_SPSTA (0x04)
+
+#define S3C2410_SPSTA_DCOL (1 << 2) /* Data Collision Error */
+#define S3C2410_SPSTA_MULD (1 << 1) /* Multi Master Error */
+#define S3C2410_SPSTA_READY (1 << 0) /* Data Tx/Rx ready */
+#define S3C2412_SPSTA_READY_ORG (1 << 3)
+
+#define S3C2410_SPPIN (0x08)
+
+#define S3C2410_SPPIN_ENMUL (1 << 2) /* Multi Master Error detect */
+#define S3C2410_SPPIN_RESERVED (1 << 1)
+#define S3C2410_SPPIN_KEEP (1 << 0) /* Master Out keep */
+
+#define S3C2410_SPPRE (0x0C)
+#define S3C2410_SPTDAT (0x10)
+#define S3C2410_SPRDAT (0x14)
+
+#endif /* __SPI_S3C2410_H */
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
new file mode 100644
index 000000000..ef25b5e93
--- /dev/null
+++ b/drivers/spi/spi-s3c24xx.c
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright 2006-2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+*/
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/s3c24xx.h>
+#include <linux/spi/s3c24xx-fiq.h>
+#include <linux/module.h>
+
+#include <asm/fiq.h>
+
+#include "spi-s3c24xx-regs.h"
+
+/**
+ * struct s3c24xx_spi_devstate - per device data
+ * @hz: Last frequency calculated for @sppre field.
+ * @mode: Last mode setting for the @spcon field.
+ * @spcon: Value to write to the SPCON register.
+ * @sppre: Value to write to the SPPRE register.
+ */
+struct s3c24xx_spi_devstate {
+ unsigned int hz;
+ unsigned int mode;
+ u8 spcon;
+ u8 sppre;
+};
+
+enum spi_fiq_mode {
+ FIQ_MODE_NONE = 0,
+ FIQ_MODE_TX = 1,
+ FIQ_MODE_RX = 2,
+ FIQ_MODE_TXRX = 3,
+};
+
+struct s3c24xx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *regs;
+ int irq;
+ int len;
+ int count;
+
+ struct fiq_handler fiq_handler;
+ enum spi_fiq_mode fiq_mode;
+ unsigned char fiq_inuse;
+ unsigned char fiq_claimed;
+
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+
+ struct clk *clk;
+ struct spi_master *master;
+ struct spi_device *curdev;
+ struct device *dev;
+ struct s3c2410_spi_info *pdata;
+};
+
+#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
+#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
+
+static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
+{
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
+ struct s3c24xx_spi *hw = to_hw(spi);
+
+ /* change the chipselect state and the state of the spi engine clock */
+
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ writeb(cs->spcon, hw->regs + S3C2410_SPCON);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ writeb(cs->spcon | S3C2410_SPCON_ENSCK,
+ hw->regs + S3C2410_SPCON);
+ break;
+ }
+}
+
+static int s3c24xx_spi_update_state(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct s3c24xx_spi *hw = to_hw(spi);
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
+ unsigned int hz;
+ unsigned int div;
+ unsigned long clk;
+
+ hz = t ? t->speed_hz : spi->max_speed_hz;
+
+ if (!hz)
+ hz = spi->max_speed_hz;
+
+ if (spi->mode != cs->mode) {
+ u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
+
+ if (spi->mode & SPI_CPHA)
+ spcon |= S3C2410_SPCON_CPHA_FMTB;
+
+ if (spi->mode & SPI_CPOL)
+ spcon |= S3C2410_SPCON_CPOL_HIGH;
+
+ cs->mode = spi->mode;
+ cs->spcon = spcon;
+ }
+
+ if (cs->hz != hz) {
+ clk = clk_get_rate(hw->clk);
+ div = DIV_ROUND_UP(clk, hz * 2) - 1;
+
+ if (div > 255)
+ div = 255;
+
+ dev_dbg(&spi->dev, "pre-scaler=%d (wanted %d, got %ld)\n",
+ div, hz, clk / (2 * (div + 1)));
+
+ cs->hz = hz;
+ cs->sppre = div;
+ }
+
+ return 0;
+}
+
+static int s3c24xx_spi_setupxfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
+ struct s3c24xx_spi *hw = to_hw(spi);
+ int ret;
+
+ ret = s3c24xx_spi_update_state(spi, t);
+ if (!ret)
+ writeb(cs->sppre, hw->regs + S3C2410_SPPRE);
+
+ return ret;
+}
+
+static int s3c24xx_spi_setup(struct spi_device *spi)
+{
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
+ struct s3c24xx_spi *hw = to_hw(spi);
+ int ret;
+
+ /* allocate settings on the first call */
+ if (!cs) {
+ cs = devm_kzalloc(&spi->dev,
+ sizeof(struct s3c24xx_spi_devstate),
+ GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+
+ cs->spcon = SPCON_DEFAULT;
+ cs->hz = -1;
+ spi->controller_state = cs;
+ }
+
+ /* initialise the state from the device */
+ ret = s3c24xx_spi_update_state(spi, NULL);
+ if (ret)
+ return ret;
+
+ mutex_lock(&hw->bitbang.lock);
+ if (!hw->bitbang.busy) {
+ hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
+ /* need to ndelay for 0.5 clocktick ? */
+ }
+ mutex_unlock(&hw->bitbang.lock);
+
+ return 0;
+}
+
+static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
+{
+ return hw->tx ? hw->tx[count] : 0;
+}
+
+#ifdef CONFIG_SPI_S3C24XX_FIQ
+/* Support for FIQ based pseudo-DMA to improve the transfer speed.
+ *
+ * This code uses the assembly helper in spi_s3c24xx_spi.S which is
+ * used by the FIQ core to move data between main memory and the peripheral
+ * block. Since this is code running on the processor, there is no problem
+ * with cache coherency of the buffers, so we can use any buffer we like.
+ */
+
+/**
+ * struct spi_fiq_code - FIQ code and header
+ * @length: The length of the code fragment, excluding this header.
+ * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at.
+ * @data: The code itself to install as a FIQ handler.
+ */
+struct spi_fiq_code {
+ u32 length;
+ u32 ack_offset;
+ u8 data[];
+};
+
+/**
+ * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer
+ * @hw: The hardware state.
+ *
+ * Claim the FIQ handler (only one can be active at any one time) and
+ * then setup the correct transfer code for this transfer.
+ *
+ * This call updates all the necessary state information if successful,
+ * so the caller does not need to do anything more than start the transfer
+ * as normal, since the IRQ will have been re-routed to the FIQ handler.
+*/
+static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
+{
+ struct pt_regs regs;
+ enum spi_fiq_mode mode;
+ struct spi_fiq_code *code;
+ u32 *ack_ptr = NULL;
+ int ret;
+
+ if (!hw->fiq_claimed) {
+ /* try and claim fiq if we haven't got it, and if not
+ * then return and simply use another transfer method */
+
+ ret = claim_fiq(&hw->fiq_handler);
+ if (ret)
+ return;
+ }
+
+ if (hw->tx && !hw->rx)
+ mode = FIQ_MODE_TX;
+ else if (hw->rx && !hw->tx)
+ mode = FIQ_MODE_RX;
+ else
+ mode = FIQ_MODE_TXRX;
+
+ regs.uregs[fiq_rspi] = (long)hw->regs;
+ regs.uregs[fiq_rrx] = (long)hw->rx;
+ regs.uregs[fiq_rtx] = (long)hw->tx + 1;
+ regs.uregs[fiq_rcount] = hw->len - 1;
+
+ set_fiq_regs(&regs);
+
+ if (hw->fiq_mode != mode) {
+ hw->fiq_mode = mode;
+
+ switch (mode) {
+ case FIQ_MODE_TX:
+ code = &s3c24xx_spi_fiq_tx;
+ break;
+ case FIQ_MODE_RX:
+ code = &s3c24xx_spi_fiq_rx;
+ break;
+ case FIQ_MODE_TXRX:
+ code = &s3c24xx_spi_fiq_txrx;
+ break;
+ default:
+ code = NULL;
+ }
+
+ BUG_ON(!code);
+
+ ack_ptr = (u32 *)&code->data[code->ack_offset];
+ set_fiq_handler(&code->data, code->length);
+ }
+
+ s3c24xx_set_fiq(hw->irq, ack_ptr, true);
+
+ hw->fiq_mode = mode;
+ hw->fiq_inuse = 1;
+}
+
+/**
+ * s3c24xx_spi_fiqop - FIQ core code callback
+ * @pw: Data registered with the handler
+ * @release: Whether this is a release or a return.
+ *
+ * Called by the FIQ code when another module wants to use the FIQ, so
+ * return whether we are currently using this or not and then update our
+ * internal state.
+ */
+static int s3c24xx_spi_fiqop(void *pw, int release)
+{
+ struct s3c24xx_spi *hw = pw;
+ int ret = 0;
+
+ if (release) {
+ if (hw->fiq_inuse)
+ ret = -EBUSY;
+
+ /* note, we do not need to unroute the FIQ, as the FIQ
+ * vector code de-routes it to signal the end of transfer */
+
+ hw->fiq_mode = FIQ_MODE_NONE;
+ hw->fiq_claimed = 0;
+ } else {
+ hw->fiq_claimed = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * s3c24xx_spi_initfiq - setup the information for the FIQ core
+ * @hw: The hardware state.
+ *
+ * Setup the fiq_handler block to pass to the FIQ core.
+ */
+static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw)
+{
+ hw->fiq_handler.dev_id = hw;
+ hw->fiq_handler.name = dev_name(hw->dev);
+ hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop;
+}
+
+/**
+ * s3c24xx_spi_usefiq - return if we should be using FIQ.
+ * @hw: The hardware state.
+ *
+ * Return true if the platform data specifies whether this channel is
+ * allowed to use the FIQ.
+ */
+static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw)
+{
+ return hw->pdata->use_fiq;
+}
+
+/**
+ * s3c24xx_spi_usingfiq - return if channel is using FIQ
+ * @spi: The hardware state.
+ *
+ * Return whether the channel is currently using the FIQ (separate from
+ * whether the FIQ is claimed).
+ */
+static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi)
+{
+ return spi->fiq_inuse;
+}
+#else
+
+static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { }
+static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { }
+static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; }
+static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; }
+
+#endif /* CONFIG_SPI_S3C24XX_FIQ */
+
+static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct s3c24xx_spi *hw = to_hw(spi);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->count = 0;
+
+ init_completion(&hw->done);
+
+ hw->fiq_inuse = 0;
+ if (s3c24xx_spi_usefiq(hw) && t->len >= 3)
+ s3c24xx_spi_tryfiq(hw);
+
+ /* send the first byte */
+ writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT);
+
+ wait_for_completion(&hw->done);
+ return hw->count;
+}
+
+static irqreturn_t s3c24xx_spi_irq(int irq, void *dev)
+{
+ struct s3c24xx_spi *hw = dev;
+ unsigned int spsta = readb(hw->regs + S3C2410_SPSTA);
+ unsigned int count = hw->count;
+
+ if (spsta & S3C2410_SPSTA_DCOL) {
+ dev_dbg(hw->dev, "data-collision\n");
+ complete(&hw->done);
+ goto irq_done;
+ }
+
+ if (!(spsta & S3C2410_SPSTA_READY)) {
+ dev_dbg(hw->dev, "spi not ready for tx?\n");
+ complete(&hw->done);
+ goto irq_done;
+ }
+
+ if (!s3c24xx_spi_usingfiq(hw)) {
+ hw->count++;
+
+ if (hw->rx)
+ hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
+
+ count++;
+
+ if (count < hw->len)
+ writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
+ else
+ complete(&hw->done);
+ } else {
+ hw->count = hw->len;
+ hw->fiq_inuse = 0;
+
+ if (hw->rx)
+ hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT);
+
+ complete(&hw->done);
+ }
+
+ irq_done:
+ return IRQ_HANDLED;
+}
+
+static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw)
+{
+ /* for the moment, permanently enable the clock */
+
+ clk_enable(hw->clk);
+
+ /* program defaults into the registers */
+
+ writeb(0xff, hw->regs + S3C2410_SPPRE);
+ writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN);
+ writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON);
+}
+
+static int s3c24xx_spi_probe(struct platform_device *pdev)
+{
+ struct s3c2410_spi_info *pdata;
+ struct s3c24xx_spi *hw;
+ struct spi_master *master;
+ int err = 0;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "No memory for spi_master\n");
+ return -ENOMEM;
+ }
+
+ hw = spi_master_get_devdata(master);
+
+ hw->master = master;
+ hw->pdata = pdata = dev_get_platdata(&pdev->dev);
+ hw->dev = &pdev->dev;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ return -ENOENT;
+ }
+
+ platform_set_drvdata(pdev, hw);
+ init_completion(&hw->done);
+
+ /* initialise fiq handler */
+
+ s3c24xx_spi_initfiq(hw);
+
+ /* setup the master state. */
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ master->num_chipselect = hw->pdata->num_cs;
+ master->bus_num = pdata->bus_num;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ /* we need to call the local chipselect callback */
+ master->flags = SPI_MASTER_GPIO_SS;
+ master->use_gpio_descriptors = true;
+
+ /* setup the state for the bitbang driver */
+
+ hw->bitbang.master = hw->master;
+ hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer;
+ hw->bitbang.chipselect = s3c24xx_spi_chipsel;
+ hw->bitbang.txrx_bufs = s3c24xx_spi_txrx;
+
+ hw->master->setup = s3c24xx_spi_setup;
+
+ dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
+
+ /* find and map our resources */
+ hw->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hw->regs))
+ return PTR_ERR(hw->regs);
+
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq < 0)
+ return -ENOENT;
+
+ err = devm_request_irq(&pdev->dev, hw->irq, s3c24xx_spi_irq, 0,
+ pdev->name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ return err;
+ }
+
+ hw->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(hw->clk)) {
+ dev_err(&pdev->dev, "No clock for device\n");
+ return PTR_ERR(hw->clk);
+ }
+
+ s3c24xx_spi_initialsetup(hw);
+
+ /* register our spi controller */
+
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register SPI master\n");
+ goto err_register;
+ }
+
+ return 0;
+
+ err_register:
+ clk_disable(hw->clk);
+
+ return err;
+}
+
+static int s3c24xx_spi_remove(struct platform_device *dev)
+{
+ struct s3c24xx_spi *hw = platform_get_drvdata(dev);
+
+ spi_bitbang_stop(&hw->bitbang);
+ clk_disable(hw->clk);
+ spi_master_put(hw->master);
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+
+static int s3c24xx_spi_suspend(struct device *dev)
+{
+ struct s3c24xx_spi *hw = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(hw->master);
+ if (ret)
+ return ret;
+
+ clk_disable(hw->clk);
+ return 0;
+}
+
+static int s3c24xx_spi_resume(struct device *dev)
+{
+ struct s3c24xx_spi *hw = dev_get_drvdata(dev);
+
+ s3c24xx_spi_initialsetup(hw);
+ return spi_master_resume(hw->master);
+}
+
+static const struct dev_pm_ops s3c24xx_spi_pmops = {
+ .suspend = s3c24xx_spi_suspend,
+ .resume = s3c24xx_spi_resume,
+};
+
+#define S3C24XX_SPI_PMOPS &s3c24xx_spi_pmops
+#else
+#define S3C24XX_SPI_PMOPS NULL
+#endif /* CONFIG_PM */
+
+MODULE_ALIAS("platform:s3c2410-spi");
+static struct platform_driver s3c24xx_spi_driver = {
+ .probe = s3c24xx_spi_probe,
+ .remove = s3c24xx_spi_remove,
+ .driver = {
+ .name = "s3c2410-spi",
+ .pm = S3C24XX_SPI_PMOPS,
+ },
+};
+module_platform_driver(s3c24xx_spi_driver);
+
+MODULE_DESCRIPTION("S3C24XX SPI Driver");
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
new file mode 100644
index 000000000..1480df7b4
--- /dev/null
+++ b/drivers/spi/spi-s3c64xx.c
@@ -0,0 +1,1545 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2009 Samsung Electronics Co., Ltd.
+// Jaswinder Singh <jassi.brar@samsung.com>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/platform_data/spi-s3c64xx.h>
+
+#define MAX_SPI_PORTS 12
+#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
+#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
+#define AUTOSUSPEND_TIMEOUT 2000
+
+/* Registers and bit-fields */
+
+#define S3C64XX_SPI_CH_CFG 0x00
+#define S3C64XX_SPI_CLK_CFG 0x04
+#define S3C64XX_SPI_MODE_CFG 0x08
+#define S3C64XX_SPI_CS_REG 0x0C
+#define S3C64XX_SPI_INT_EN 0x10
+#define S3C64XX_SPI_STATUS 0x14
+#define S3C64XX_SPI_TX_DATA 0x18
+#define S3C64XX_SPI_RX_DATA 0x1C
+#define S3C64XX_SPI_PACKET_CNT 0x20
+#define S3C64XX_SPI_PENDING_CLR 0x24
+#define S3C64XX_SPI_SWAP_CFG 0x28
+#define S3C64XX_SPI_FB_CLK 0x2C
+
+#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
+#define S3C64XX_SPI_CH_SW_RST (1<<5)
+#define S3C64XX_SPI_CH_SLAVE (1<<4)
+#define S3C64XX_SPI_CPOL_L (1<<3)
+#define S3C64XX_SPI_CPHA_B (1<<2)
+#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
+#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
+
+#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
+#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
+#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
+#define S3C64XX_SPI_PSR_MASK 0xff
+
+#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
+#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
+#define S3C64XX_SPI_MODE_SELF_LOOPBACK (1<<3)
+#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
+#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
+#define S3C64XX_SPI_MODE_4BURST (1<<0)
+
+#define S3C64XX_SPI_CS_NSC_CNT_2 (2<<4)
+#define S3C64XX_SPI_CS_AUTO (1<<1)
+#define S3C64XX_SPI_CS_SIG_INACT (1<<0)
+
+#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
+#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
+#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
+#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
+#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
+#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
+#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
+
+#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
+#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
+#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
+#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
+#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
+#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
+
+#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
+#define S3C64XX_SPI_PACKET_CNT_MASK GENMASK(15, 0)
+
+#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
+#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
+#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
+#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
+#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
+
+#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
+#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
+#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
+#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
+#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
+#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
+#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
+#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
+
+#define S3C64XX_SPI_FBCLK_MSK (3<<0)
+
+#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
+#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
+ (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
+#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
+#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
+ FIFO_LVL_MASK(i))
+
+#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
+#define S3C64XX_SPI_TRAILCNT_OFF 19
+
+#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+#define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
+
+#define RXBUSY (1<<2)
+#define TXBUSY (1<<3)
+
+struct s3c64xx_spi_dma_data {
+ struct dma_chan *ch;
+ dma_cookie_t cookie;
+ enum dma_transfer_direction direction;
+};
+
+/**
+ * struct s3c64xx_spi_port_config - SPI Controller hardware info
+ * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
+ * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
+ * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
+ * @clk_div: Internal clock divider
+ * @quirks: Bitmask of known quirks
+ * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
+ * @clk_from_cmu: True, if the controller does not include a clock mux and
+ * prescaler unit.
+ * @clk_ioclk: True if clock is present on this device
+ * @has_loopback: True if loopback mode can be supported
+ *
+ * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
+ * differ in some aspects such as the size of the fifo and spi bus clock
+ * setup. Such differences are specified to the driver using this structure
+ * which is provided as driver data to the driver.
+ */
+struct s3c64xx_spi_port_config {
+ int fifo_lvl_mask[MAX_SPI_PORTS];
+ int rx_lvl_offset;
+ int tx_st_done;
+ int quirks;
+ int clk_div;
+ bool high_speed;
+ bool clk_from_cmu;
+ bool clk_ioclk;
+ bool has_loopback;
+};
+
+/**
+ * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
+ * @clk: Pointer to the spi clock.
+ * @src_clk: Pointer to the clock used to generate SPI signals.
+ * @ioclk: Pointer to the i/o clock between master and slave
+ * @pdev: Pointer to device's platform device data
+ * @master: Pointer to the SPI Protocol master.
+ * @cntrlr_info: Platform specific data for the controller this driver manages.
+ * @lock: Controller specific lock.
+ * @state: Set of FLAGS to indicate status.
+ * @sfr_start: BUS address of SPI controller regs.
+ * @regs: Pointer to ioremap'ed controller registers.
+ * @xfer_completion: To indicate completion of xfer task.
+ * @cur_mode: Stores the active configuration of the controller.
+ * @cur_bpw: Stores the active bits per word settings.
+ * @cur_speed: Current clock speed
+ * @rx_dma: Local receive DMA data (e.g. chan and direction)
+ * @tx_dma: Local transmit DMA data (e.g. chan and direction)
+ * @port_conf: Local SPI port configuartion data
+ * @port_id: Port identification number
+ */
+struct s3c64xx_spi_driver_data {
+ void __iomem *regs;
+ struct clk *clk;
+ struct clk *src_clk;
+ struct clk *ioclk;
+ struct platform_device *pdev;
+ struct spi_master *master;
+ struct s3c64xx_spi_info *cntrlr_info;
+ spinlock_t lock;
+ unsigned long sfr_start;
+ struct completion xfer_completion;
+ unsigned state;
+ unsigned cur_mode, cur_bpw;
+ unsigned cur_speed;
+ struct s3c64xx_spi_dma_data rx_dma;
+ struct s3c64xx_spi_dma_data tx_dma;
+ const struct s3c64xx_spi_port_config *port_conf;
+ unsigned int port_id;
+};
+
+static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long loops;
+ u32 val;
+
+ writel(0, regs + S3C64XX_SPI_PACKET_CNT);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val |= S3C64XX_SPI_CH_SW_RST;
+ val &= ~S3C64XX_SPI_CH_HS_EN;
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ /* Flush TxFIFO*/
+ loops = msecs_to_loops(1);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+ } while (TX_FIFO_LVL(val, sdd) && loops--);
+
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
+
+ /* Flush RxFIFO*/
+ loops = msecs_to_loops(1);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+ if (RX_FIFO_LVL(val, sdd))
+ readl(regs + S3C64XX_SPI_RX_DATA);
+ else
+ break;
+ } while (loops--);
+
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~S3C64XX_SPI_CH_SW_RST;
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+}
+
+static void s3c64xx_spi_dmacb(void *data)
+{
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_dma_data *dma = data;
+ unsigned long flags;
+
+ if (dma->direction == DMA_DEV_TO_MEM)
+ sdd = container_of(data,
+ struct s3c64xx_spi_driver_data, rx_dma);
+ else
+ sdd = container_of(data,
+ struct s3c64xx_spi_driver_data, tx_dma);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ if (dma->direction == DMA_DEV_TO_MEM) {
+ sdd->state &= ~RXBUSY;
+ if (!(sdd->state & TXBUSY))
+ complete(&sdd->xfer_completion);
+ } else {
+ sdd->state &= ~TXBUSY;
+ if (!(sdd->state & RXBUSY))
+ complete(&sdd->xfer_completion);
+ }
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+}
+
+static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
+ struct sg_table *sgt)
+{
+ struct s3c64xx_spi_driver_data *sdd;
+ struct dma_slave_config config;
+ struct dma_async_tx_descriptor *desc;
+ int ret;
+
+ memset(&config, 0, sizeof(config));
+
+ if (dma->direction == DMA_DEV_TO_MEM) {
+ sdd = container_of((void *)dma,
+ struct s3c64xx_spi_driver_data, rx_dma);
+ config.direction = dma->direction;
+ config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
+ config.src_addr_width = sdd->cur_bpw / 8;
+ config.src_maxburst = 1;
+ dmaengine_slave_config(dma->ch, &config);
+ } else {
+ sdd = container_of((void *)dma,
+ struct s3c64xx_spi_driver_data, tx_dma);
+ config.direction = dma->direction;
+ config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
+ config.dst_addr_width = sdd->cur_bpw / 8;
+ config.dst_maxburst = 1;
+ dmaengine_slave_config(dma->ch, &config);
+ }
+
+ desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
+ dma->direction, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
+ dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
+ return -ENOMEM;
+ }
+
+ desc->callback = s3c64xx_spi_dmacb;
+ desc->callback_param = dma;
+
+ dma->cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret) {
+ dev_err(&sdd->pdev->dev, "DMA submission failed");
+ return -EIO;
+ }
+
+ dma_async_issue_pending(dma->ch);
+ return 0;
+}
+
+static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct s3c64xx_spi_driver_data *sdd =
+ spi_master_get_devdata(spi->master);
+
+ if (sdd->cntrlr_info->no_cs)
+ return;
+
+ if (enable) {
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
+ writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
+ } else {
+ u32 ssel = readl(sdd->regs + S3C64XX_SPI_CS_REG);
+
+ ssel |= (S3C64XX_SPI_CS_AUTO |
+ S3C64XX_SPI_CS_NSC_CNT_2);
+ writel(ssel, sdd->regs + S3C64XX_SPI_CS_REG);
+ }
+ } else {
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(S3C64XX_SPI_CS_SIG_INACT,
+ sdd->regs + S3C64XX_SPI_CS_REG);
+ }
+}
+
+static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+
+ if (is_polling(sdd))
+ return 0;
+
+ /* Requests DMA channels */
+ sdd->rx_dma.ch = dma_request_chan(&sdd->pdev->dev, "rx");
+ if (IS_ERR(sdd->rx_dma.ch)) {
+ dev_err(&sdd->pdev->dev, "Failed to get RX DMA channel\n");
+ sdd->rx_dma.ch = NULL;
+ return 0;
+ }
+
+ sdd->tx_dma.ch = dma_request_chan(&sdd->pdev->dev, "tx");
+ if (IS_ERR(sdd->tx_dma.ch)) {
+ dev_err(&sdd->pdev->dev, "Failed to get TX DMA channel\n");
+ dma_release_channel(sdd->rx_dma.ch);
+ sdd->tx_dma.ch = NULL;
+ sdd->rx_dma.ch = NULL;
+ return 0;
+ }
+
+ spi->dma_rx = sdd->rx_dma.ch;
+ spi->dma_tx = sdd->tx_dma.ch;
+
+ return 0;
+}
+
+static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+
+ if (is_polling(sdd))
+ return 0;
+
+ /* Releases DMA channels if they are allocated */
+ if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
+ dma_release_channel(sdd->rx_dma.ch);
+ dma_release_channel(sdd->tx_dma.ch);
+ sdd->rx_dma.ch = NULL;
+ sdd->tx_dma.ch = NULL;
+ }
+
+ return 0;
+}
+
+static bool s3c64xx_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
+ return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ } else {
+ return false;
+ }
+
+}
+
+static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer, int dma_mode)
+{
+ void __iomem *regs = sdd->regs;
+ u32 modecfg, chcfg;
+ int ret = 0;
+
+ modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
+ modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
+
+ chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
+ chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
+
+ if (dma_mode) {
+ chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
+ } else {
+ /* Always shift in data in FIFO, even if xfer is Tx only,
+ * this helps setting PCKT_CNT value for generating clocks
+ * as exactly needed.
+ */
+ chcfg |= S3C64XX_SPI_CH_RXCH_ON;
+ writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
+ | S3C64XX_SPI_PACKET_CNT_EN,
+ regs + S3C64XX_SPI_PACKET_CNT);
+ }
+
+ if (xfer->tx_buf != NULL) {
+ sdd->state |= TXBUSY;
+ chcfg |= S3C64XX_SPI_CH_TXCH_ON;
+ if (dma_mode) {
+ modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
+ ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
+ } else {
+ switch (sdd->cur_bpw) {
+ case 32:
+ iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
+ xfer->tx_buf, xfer->len / 4);
+ break;
+ case 16:
+ iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
+ xfer->tx_buf, xfer->len / 2);
+ break;
+ default:
+ iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
+ xfer->tx_buf, xfer->len);
+ break;
+ }
+ }
+ }
+
+ if (xfer->rx_buf != NULL) {
+ sdd->state |= RXBUSY;
+
+ if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
+ && !(sdd->cur_mode & SPI_CPHA))
+ chcfg |= S3C64XX_SPI_CH_HS_EN;
+
+ if (dma_mode) {
+ modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
+ chcfg |= S3C64XX_SPI_CH_RXCH_ON;
+ writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
+ | S3C64XX_SPI_PACKET_CNT_EN,
+ regs + S3C64XX_SPI_PACKET_CNT);
+ ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
+ writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
+
+ return 0;
+}
+
+static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
+ int timeout_ms)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val = 1;
+ u32 status;
+
+ /* max fifo depth available */
+ u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
+
+ if (timeout_ms)
+ val = msecs_to_loops(timeout_ms);
+
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
+
+ /* return the actual received data length */
+ return RX_FIFO_LVL(status, sdd);
+}
+
+static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ u32 status;
+ int ms;
+
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 30; /* some tolerance */
+ ms = max(ms, 100); /* minimum timeout */
+
+ val = msecs_to_jiffies(ms) + 10;
+ val = wait_for_completion_timeout(&sdd->xfer_completion, val);
+
+ /*
+ * If the previous xfer was completed within timeout, then
+ * proceed further else return -EIO.
+ * DmaTx returns after simply writing data in the FIFO,
+ * w/o waiting for real transmission on the bus to finish.
+ * DmaRx returns only after Dma read data from FIFO which
+ * needs bus transmission to finish, so we don't worry if
+ * Xfer involved Rx(with or without Tx).
+ */
+ if (val && !xfer->rx_buf) {
+ val = msecs_to_loops(10);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ while ((TX_FIFO_LVL(status, sdd)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
+ && --val) {
+ cpu_relax();
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ }
+
+ }
+
+ /* If timed out while checking rx/tx status return error */
+ if (!val)
+ return -EIO;
+
+ return 0;
+}
+
+static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ u32 status;
+ int loops;
+ u32 cpy_len;
+ u8 *buf;
+ int ms;
+
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 10; /* some tolerance */
+
+ val = msecs_to_loops(ms);
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
+
+ if (!val)
+ return -EIO;
+
+ /* If it was only Tx */
+ if (!xfer->rx_buf) {
+ sdd->state &= ~TXBUSY;
+ return 0;
+ }
+
+ /*
+ * If the receive length is bigger than the controller fifo
+ * size, calculate the loops and read the fifo as many times.
+ * loops = length / max fifo size (calculated by using the
+ * fifo mask).
+ * For any size less than the fifo size the below code is
+ * executed atleast once.
+ */
+ loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
+ buf = xfer->rx_buf;
+ do {
+ /* wait for data to be received in the fifo */
+ cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
+ (loops ? ms : 0));
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 4);
+ break;
+ case 16:
+ ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 2);
+ break;
+ default:
+ ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len);
+ break;
+ }
+
+ buf = buf + cpy_len;
+ } while (loops--);
+ sdd->state &= ~RXBUSY;
+
+ return 0;
+}
+
+static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+{
+ void __iomem *regs = sdd->regs;
+ int ret;
+ u32 val;
+ int div = sdd->port_conf->clk_div;
+
+ /* Disable Clock */
+ if (!sdd->port_conf->clk_from_cmu) {
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val &= ~S3C64XX_SPI_ENCLK_ENABLE;
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+ }
+
+ /* Set Polarity and Phase */
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_SLAVE |
+ S3C64XX_SPI_CPOL_L |
+ S3C64XX_SPI_CPHA_B);
+
+ if (sdd->cur_mode & SPI_CPOL)
+ val |= S3C64XX_SPI_CPOL_L;
+
+ if (sdd->cur_mode & SPI_CPHA)
+ val |= S3C64XX_SPI_CPHA_B;
+
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ /* Set Channel & DMA Mode */
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
+ | S3C64XX_SPI_MODE_CH_TSZ_MASK);
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
+ val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
+ break;
+ case 16:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
+ val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
+ break;
+ default:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
+ val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
+ break;
+ }
+
+ if ((sdd->cur_mode & SPI_LOOP) && sdd->port_conf->has_loopback)
+ val |= S3C64XX_SPI_MODE_SELF_LOOPBACK;
+ else
+ val &= ~S3C64XX_SPI_MODE_SELF_LOOPBACK;
+
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+
+ if (sdd->port_conf->clk_from_cmu) {
+ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * div);
+ if (ret)
+ return ret;
+ sdd->cur_speed = clk_get_rate(sdd->src_clk) / div;
+ } else {
+ /* Configure Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val &= ~S3C64XX_SPI_PSR_MASK;
+ val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / div - 1)
+ & S3C64XX_SPI_PSR_MASK);
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+
+ /* Enable Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val |= S3C64XX_SPI_ENCLK_ENABLE;
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+ }
+
+ return 0;
+}
+
+#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
+
+static int s3c64xx_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ struct s3c64xx_spi_csinfo *cs = spi->controller_data;
+
+ /* Configure feedback delay */
+ if (!cs)
+ /* No delay if not defined */
+ writel(0, sdd->regs + S3C64XX_SPI_FB_CLK);
+ else
+ writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
+
+ return 0;
+}
+
+static size_t s3c64xx_spi_max_transfer_size(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+
+ return ctlr->can_dma ? S3C64XX_SPI_PACKET_CNT_MASK : SIZE_MAX;
+}
+
+static int s3c64xx_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ const void *tx_buf = NULL;
+ void *rx_buf = NULL;
+ int target_len = 0, origin_len = 0;
+ int use_dma = 0;
+ int status;
+ u32 speed;
+ u8 bpw;
+ unsigned long flags;
+
+ reinit_completion(&sdd->xfer_completion);
+
+ /* Only BPW and Speed may change across transfers */
+ bpw = xfer->bits_per_word;
+ speed = xfer->speed_hz;
+
+ if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
+ sdd->cur_bpw = bpw;
+ sdd->cur_speed = speed;
+ sdd->cur_mode = spi->mode;
+ status = s3c64xx_spi_config(sdd);
+ if (status)
+ return status;
+ }
+
+ if (!is_polling(sdd) && (xfer->len > fifo_len) &&
+ sdd->rx_dma.ch && sdd->tx_dma.ch) {
+ use_dma = 1;
+
+ } else if (xfer->len > fifo_len) {
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+ origin_len = xfer->len;
+
+ target_len = xfer->len;
+ if (xfer->len > fifo_len)
+ xfer->len = fifo_len;
+ }
+
+ do {
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ /* Pending only which is to be done */
+ sdd->state &= ~RXBUSY;
+ sdd->state &= ~TXBUSY;
+
+ /* Start the signals */
+ s3c64xx_spi_set_cs(spi, true);
+
+ status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ if (status) {
+ dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
+ break;
+ }
+
+ if (use_dma)
+ status = s3c64xx_wait_for_dma(sdd, xfer);
+ else
+ status = s3c64xx_wait_for_pio(sdd, xfer);
+
+ if (status) {
+ dev_err(&spi->dev,
+ "I/O Error: rx-%d tx-%d rx-%c tx-%c len-%d dma-%d res-(%d)\n",
+ xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
+ (sdd->state & RXBUSY) ? 'f' : 'p',
+ (sdd->state & TXBUSY) ? 'f' : 'p',
+ xfer->len, use_dma ? 1 : 0, status);
+
+ if (use_dma) {
+ struct dma_tx_state s;
+
+ if (xfer->tx_buf && (sdd->state & TXBUSY)) {
+ dmaengine_pause(sdd->tx_dma.ch);
+ dmaengine_tx_status(sdd->tx_dma.ch, sdd->tx_dma.cookie, &s);
+ dmaengine_terminate_all(sdd->tx_dma.ch);
+ dev_err(&spi->dev, "TX residue: %d\n", s.residue);
+
+ }
+ if (xfer->rx_buf && (sdd->state & RXBUSY)) {
+ dmaengine_pause(sdd->rx_dma.ch);
+ dmaengine_tx_status(sdd->rx_dma.ch, sdd->rx_dma.cookie, &s);
+ dmaengine_terminate_all(sdd->rx_dma.ch);
+ dev_err(&spi->dev, "RX residue: %d\n", s.residue);
+ }
+ }
+ } else {
+ s3c64xx_flush_fifo(sdd);
+ }
+ if (target_len > 0) {
+ target_len -= xfer->len;
+
+ if (xfer->tx_buf)
+ xfer->tx_buf += xfer->len;
+
+ if (xfer->rx_buf)
+ xfer->rx_buf += xfer->len;
+
+ if (target_len > fifo_len)
+ xfer->len = fifo_len;
+ else
+ xfer->len = target_len;
+ }
+ } while (target_len > 0);
+
+ if (origin_len) {
+ /* Restore original xfer buffers and length */
+ xfer->tx_buf = tx_buf;
+ xfer->rx_buf = rx_buf;
+ xfer->len = origin_len;
+ }
+
+ return status;
+}
+
+static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
+ struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs;
+ struct device_node *slave_np, *data_np = NULL;
+ u32 fb_delay = 0;
+
+ slave_np = spi->dev.of_node;
+ if (!slave_np) {
+ dev_err(&spi->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return ERR_PTR(-ENOMEM);
+
+ data_np = of_get_child_by_name(slave_np, "controller-data");
+ if (!data_np) {
+ dev_info(&spi->dev, "feedback delay set to default (0)\n");
+ return cs;
+ }
+
+ of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
+ cs->fb_delay = fb_delay;
+ of_node_put(data_np);
+ return cs;
+}
+
+/*
+ * Here we only check the validity of requested configuration
+ * and save the configuration in a local data-structure.
+ * The controller is actually configured only just before we
+ * get a message to transfer.
+ */
+static int s3c64xx_spi_setup(struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs = spi->controller_data;
+ struct s3c64xx_spi_driver_data *sdd;
+ int err;
+ int div;
+
+ sdd = spi_master_get_devdata(spi->master);
+ if (spi->dev.of_node) {
+ cs = s3c64xx_get_slave_ctrldata(spi);
+ spi->controller_data = cs;
+ }
+
+ /* NULL is fine, we just avoid using the FB delay (=0) */
+ if (IS_ERR(cs)) {
+ dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
+ return -ENODEV;
+ }
+
+ if (!spi_get_ctldata(spi))
+ spi_set_ctldata(spi, cs);
+
+ pm_runtime_get_sync(&sdd->pdev->dev);
+
+ div = sdd->port_conf->clk_div;
+
+ /* Check if we can provide the requested rate */
+ if (!sdd->port_conf->clk_from_cmu) {
+ u32 psr, speed;
+
+ /* Max possible */
+ speed = clk_get_rate(sdd->src_clk) / div / (0 + 1);
+
+ if (spi->max_speed_hz > speed)
+ spi->max_speed_hz = speed;
+
+ psr = clk_get_rate(sdd->src_clk) / div / spi->max_speed_hz - 1;
+ psr &= S3C64XX_SPI_PSR_MASK;
+ if (psr == S3C64XX_SPI_PSR_MASK)
+ psr--;
+
+ speed = clk_get_rate(sdd->src_clk) / div / (psr + 1);
+ if (spi->max_speed_hz < speed) {
+ if (psr+1 < S3C64XX_SPI_PSR_MASK) {
+ psr++;
+ } else {
+ err = -EINVAL;
+ goto setup_exit;
+ }
+ }
+
+ speed = clk_get_rate(sdd->src_clk) / div / (psr + 1);
+ if (spi->max_speed_hz >= speed) {
+ spi->max_speed_hz = speed;
+ } else {
+ dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
+ spi->max_speed_hz);
+ err = -EINVAL;
+ goto setup_exit;
+ }
+ }
+
+ pm_runtime_mark_last_busy(&sdd->pdev->dev);
+ pm_runtime_put_autosuspend(&sdd->pdev->dev);
+ s3c64xx_spi_set_cs(spi, false);
+
+ return 0;
+
+setup_exit:
+ pm_runtime_mark_last_busy(&sdd->pdev->dev);
+ pm_runtime_put_autosuspend(&sdd->pdev->dev);
+ /* setup() returns with device de-selected */
+ s3c64xx_spi_set_cs(spi, false);
+
+ spi_set_ctldata(spi, NULL);
+
+ /* This was dynamically allocated on the DT path */
+ if (spi->dev.of_node)
+ kfree(cs);
+
+ return err;
+}
+
+static void s3c64xx_spi_cleanup(struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
+
+ /* This was dynamically allocated on the DT path */
+ if (spi->dev.of_node)
+ kfree(cs);
+
+ spi_set_ctldata(spi, NULL);
+}
+
+static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
+{
+ struct s3c64xx_spi_driver_data *sdd = data;
+ struct spi_master *spi = sdd->master;
+ unsigned int val, clr = 0;
+
+ val = readl(sdd->regs + S3C64XX_SPI_STATUS);
+
+ if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
+ clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
+ dev_err(&spi->dev, "RX overrun\n");
+ }
+ if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
+ clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
+ dev_err(&spi->dev, "RX underrun\n");
+ }
+ if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
+ clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
+ dev_err(&spi->dev, "TX overrun\n");
+ }
+ if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
+ clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
+ dev_err(&spi->dev, "TX underrun\n");
+ }
+
+ /* Clear the pending irq by setting and then clearing it */
+ writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
+ writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
+{
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ unsigned int val;
+
+ sdd->cur_speed = 0;
+
+ if (sci->no_cs)
+ writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
+ else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(S3C64XX_SPI_CS_SIG_INACT, sdd->regs + S3C64XX_SPI_CS_REG);
+
+ /* Disable Interrupts - we use Polling if not DMA mode */
+ writel(0, regs + S3C64XX_SPI_INT_EN);
+
+ if (!sdd->port_conf->clk_from_cmu)
+ writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
+ regs + S3C64XX_SPI_CLK_CFG);
+ writel(0, regs + S3C64XX_SPI_MODE_CFG);
+ writel(0, regs + S3C64XX_SPI_PACKET_CNT);
+
+ /* Clear any irq pending bits, should set and clear the bits */
+ val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
+ S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
+ S3C64XX_SPI_PND_TX_OVERRUN_CLR |
+ S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
+ writel(val, regs + S3C64XX_SPI_PENDING_CLR);
+ writel(0, regs + S3C64XX_SPI_PENDING_CLR);
+
+ writel(0, regs + S3C64XX_SPI_SWAP_CFG);
+
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~S3C64XX_SPI_MODE_4BURST;
+ val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
+ val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+
+ s3c64xx_flush_fifo(sdd);
+}
+
+#ifdef CONFIG_OF
+static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
+{
+ struct s3c64xx_spi_info *sci;
+ u32 temp;
+
+ sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
+ if (!sci)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
+ dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
+ sci->src_clk_nr = 0;
+ } else {
+ sci->src_clk_nr = temp;
+ }
+
+ if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
+ dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
+ sci->num_cs = 1;
+ } else {
+ sci->num_cs = temp;
+ }
+
+ sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
+
+ return sci;
+}
+#else
+static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
+{
+ return dev_get_platdata(dev);
+}
+#endif
+
+static inline const struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node)
+ return of_device_get_match_data(&pdev->dev);
+#endif
+ return (const struct s3c64xx_spi_port_config *)platform_get_device_id(pdev)->driver_data;
+}
+
+static int s3c64xx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *mem_res;
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
+ struct spi_master *master;
+ int ret, irq;
+ char clk_name[16];
+
+ if (!sci && pdev->dev.of_node) {
+ sci = s3c64xx_spi_parse_dt(&pdev->dev);
+ if (IS_ERR(sci))
+ return PTR_ERR(sci);
+ }
+
+ if (!sci) {
+ dev_err(&pdev->dev, "platform_data missing!\n");
+ return -ENODEV;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem_res == NULL) {
+ dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
+ return -ENXIO;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ return irq;
+ }
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct s3c64xx_spi_driver_data));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ sdd = spi_master_get_devdata(master);
+ sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
+ sdd->master = master;
+ sdd->cntrlr_info = sci;
+ sdd->pdev = pdev;
+ sdd->sfr_start = mem_res->start;
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "spi");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
+ ret);
+ goto err_deref_master;
+ }
+ sdd->port_id = ret;
+ } else {
+ sdd->port_id = pdev->id;
+ }
+
+ sdd->cur_bpw = 8;
+
+ sdd->tx_dma.direction = DMA_MEM_TO_DEV;
+ sdd->rx_dma.direction = DMA_DEV_TO_MEM;
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = sdd->port_id;
+ master->setup = s3c64xx_spi_setup;
+ master->cleanup = s3c64xx_spi_cleanup;
+ master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
+ master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
+ master->prepare_message = s3c64xx_spi_prepare_message;
+ master->transfer_one = s3c64xx_spi_transfer_one;
+ master->max_transfer_size = s3c64xx_spi_max_transfer_size;
+ master->num_chipselect = sci->num_cs;
+ master->use_gpio_descriptors = true;
+ master->dma_alignment = 8;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ if (sdd->port_conf->has_loopback)
+ master->mode_bits |= SPI_LOOP;
+ master->auto_runtime_pm = true;
+ if (!is_polling(sdd))
+ master->can_dma = s3c64xx_spi_can_dma;
+
+ sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(sdd->regs)) {
+ ret = PTR_ERR(sdd->regs);
+ goto err_deref_master;
+ }
+
+ if (sci->cfg_gpio && sci->cfg_gpio()) {
+ dev_err(&pdev->dev, "Unable to config gpio\n");
+ ret = -EBUSY;
+ goto err_deref_master;
+ }
+
+ /* Setup clocks */
+ sdd->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(sdd->clk)) {
+ dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
+ ret = PTR_ERR(sdd->clk);
+ goto err_deref_master;
+ }
+
+ ret = clk_prepare_enable(sdd->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
+ goto err_deref_master;
+ }
+
+ sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
+ sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(sdd->src_clk)) {
+ dev_err(&pdev->dev,
+ "Unable to acquire clock '%s'\n", clk_name);
+ ret = PTR_ERR(sdd->src_clk);
+ goto err_disable_clk;
+ }
+
+ ret = clk_prepare_enable(sdd->src_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
+ goto err_disable_clk;
+ }
+
+ if (sdd->port_conf->clk_ioclk) {
+ sdd->ioclk = devm_clk_get(&pdev->dev, "spi_ioclk");
+ if (IS_ERR(sdd->ioclk)) {
+ dev_err(&pdev->dev, "Unable to acquire 'ioclk'\n");
+ ret = PTR_ERR(sdd->ioclk);
+ goto err_disable_src_clk;
+ }
+
+ ret = clk_prepare_enable(sdd->ioclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable clock 'ioclk'\n");
+ goto err_disable_src_clk;
+ }
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* Setup Deufult Mode */
+ s3c64xx_spi_hwinit(sdd);
+
+ spin_lock_init(&sdd->lock);
+ init_completion(&sdd->xfer_completion);
+
+ ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
+ "spi-s3c64xx", sdd);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
+ irq, ret);
+ goto err_pm_put;
+ }
+
+ writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
+ S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
+ sdd->regs + S3C64XX_SPI_INT_EN);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
+ goto err_pm_put;
+ }
+
+ dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
+ sdd->port_id, master->num_chipselect);
+ dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
+ mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ clk_disable_unprepare(sdd->ioclk);
+err_disable_src_clk:
+ clk_disable_unprepare(sdd->src_clk);
+err_disable_clk:
+ clk_disable_unprepare(sdd->clk);
+err_deref_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int s3c64xx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
+
+ if (!is_polling(sdd)) {
+ dma_release_channel(sdd->rx_dma.ch);
+ dma_release_channel(sdd->tx_dma.ch);
+ }
+
+ clk_disable_unprepare(sdd->ioclk);
+
+ clk_disable_unprepare(sdd->src_clk);
+
+ clk_disable_unprepare(sdd->clk);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int s3c64xx_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ int ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0)
+ return ret;
+
+ sdd->cur_speed = 0; /* Output Clock is stopped */
+
+ return 0;
+}
+
+static int s3c64xx_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
+ int ret;
+
+ if (sci->cfg_gpio)
+ sci->cfg_gpio();
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int s3c64xx_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
+ clk_disable_unprepare(sdd->ioclk);
+
+ return 0;
+}
+
+static int s3c64xx_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ int ret;
+
+ if (sdd->port_conf->clk_ioclk) {
+ ret = clk_prepare_enable(sdd->ioclk);
+ if (ret != 0)
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sdd->src_clk);
+ if (ret != 0)
+ goto err_disable_ioclk;
+
+ ret = clk_prepare_enable(sdd->clk);
+ if (ret != 0)
+ goto err_disable_src_clk;
+
+ s3c64xx_spi_hwinit(sdd);
+
+ writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
+ S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
+ sdd->regs + S3C64XX_SPI_INT_EN);
+
+ return 0;
+
+err_disable_src_clk:
+ clk_disable_unprepare(sdd->src_clk);
+err_disable_ioclk:
+ clk_disable_unprepare(sdd->ioclk);
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops s3c64xx_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
+ SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
+ s3c64xx_spi_runtime_resume, NULL)
+};
+
+static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f },
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+ .clk_div = 2,
+ .high_speed = true,
+};
+
+static const struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f, 0x7F },
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+ .clk_div = 2,
+};
+
+static const struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .clk_div = 2,
+ .high_speed = true,
+};
+
+static const struct s3c64xx_spi_port_config exynos4_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .clk_div = 2,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
+static const struct s3c64xx_spi_port_config exynos7_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .clk_div = 2,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
+static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .clk_div = 2,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .clk_ioclk = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
+static const struct s3c64xx_spi_port_config exynosautov9_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f},
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .clk_div = 4,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .clk_ioclk = true,
+ .has_loopback = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
+static const struct s3c64xx_spi_port_config fsd_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f},
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .clk_div = 2,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .clk_ioclk = false,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
+static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
+ {
+ .name = "s3c2443-spi",
+ .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
+ }, {
+ .name = "s3c6410-spi",
+ .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
+ },
+ { },
+};
+
+static const struct of_device_id s3c64xx_spi_dt_match[] = {
+ { .compatible = "samsung,s3c2443-spi",
+ .data = (void *)&s3c2443_spi_port_config,
+ },
+ { .compatible = "samsung,s3c6410-spi",
+ .data = (void *)&s3c6410_spi_port_config,
+ },
+ { .compatible = "samsung,s5pv210-spi",
+ .data = (void *)&s5pv210_spi_port_config,
+ },
+ { .compatible = "samsung,exynos4210-spi",
+ .data = (void *)&exynos4_spi_port_config,
+ },
+ { .compatible = "samsung,exynos7-spi",
+ .data = (void *)&exynos7_spi_port_config,
+ },
+ { .compatible = "samsung,exynos5433-spi",
+ .data = (void *)&exynos5433_spi_port_config,
+ },
+ { .compatible = "samsung,exynosautov9-spi",
+ .data = (void *)&exynosautov9_spi_port_config,
+ },
+ { .compatible = "tesla,fsd-spi",
+ .data = (void *)&fsd_spi_port_config,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
+
+static struct platform_driver s3c64xx_spi_driver = {
+ .driver = {
+ .name = "s3c64xx-spi",
+ .pm = &s3c64xx_spi_pm,
+ .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
+ },
+ .probe = s3c64xx_spi_probe,
+ .remove = s3c64xx_spi_remove,
+ .id_table = s3c64xx_spi_driver_ids,
+};
+MODULE_ALIAS("platform:s3c64xx-spi");
+
+module_platform_driver(s3c64xx_spi_driver);
+
+MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
new file mode 100644
index 000000000..5d27ee482
--- /dev/null
+++ b/drivers/spi/spi-sc18is602.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NXP SC18IS602/603 SPI driver
+ *
+ * Copyright (C) Guenter Roeck <linux@roeck-us.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_data/sc18is602.h>
+#include <linux/gpio/consumer.h>
+
+enum chips { sc18is602, sc18is602b, sc18is603 };
+
+#define SC18IS602_BUFSIZ 200
+#define SC18IS602_CLOCK 7372000
+
+#define SC18IS602_MODE_CPHA BIT(2)
+#define SC18IS602_MODE_CPOL BIT(3)
+#define SC18IS602_MODE_LSB_FIRST BIT(5)
+#define SC18IS602_MODE_CLOCK_DIV_4 0x0
+#define SC18IS602_MODE_CLOCK_DIV_16 0x1
+#define SC18IS602_MODE_CLOCK_DIV_64 0x2
+#define SC18IS602_MODE_CLOCK_DIV_128 0x3
+
+struct sc18is602 {
+ struct spi_master *master;
+ struct device *dev;
+ u8 ctrl;
+ u32 freq;
+ u32 speed;
+
+ /* I2C data */
+ struct i2c_client *client;
+ enum chips id;
+ u8 buffer[SC18IS602_BUFSIZ + 1];
+ int tlen; /* Data queued for tx in buffer */
+ int rindex; /* Receive data index in buffer */
+
+ struct gpio_desc *reset;
+};
+
+static int sc18is602_wait_ready(struct sc18is602 *hw, int len)
+{
+ int i, err;
+ int usecs = 1000000 * len / hw->speed + 1;
+ u8 dummy[1];
+
+ for (i = 0; i < 10; i++) {
+ err = i2c_master_recv(hw->client, dummy, 1);
+ if (err >= 0)
+ return 0;
+ usleep_range(usecs, usecs * 2);
+ }
+ return -ETIMEDOUT;
+}
+
+static int sc18is602_txrx(struct sc18is602 *hw, struct spi_message *msg,
+ struct spi_transfer *t, bool do_transfer)
+{
+ unsigned int len = t->len;
+ int ret;
+
+ if (hw->tlen == 0) {
+ /* First byte (I2C command) is chip select */
+ hw->buffer[0] = 1 << msg->spi->chip_select;
+ hw->tlen = 1;
+ hw->rindex = 0;
+ }
+ /*
+ * We can not immediately send data to the chip, since each I2C message
+ * resembles a full SPI message (from CS active to CS inactive).
+ * Enqueue messages up to the first read or until do_transfer is true.
+ */
+ if (t->tx_buf) {
+ memcpy(&hw->buffer[hw->tlen], t->tx_buf, len);
+ hw->tlen += len;
+ if (t->rx_buf)
+ do_transfer = true;
+ else
+ hw->rindex = hw->tlen - 1;
+ } else if (t->rx_buf) {
+ /*
+ * For receive-only transfers we still need to perform a dummy
+ * write to receive data from the SPI chip.
+ * Read data starts at the end of transmit data (minus 1 to
+ * account for CS).
+ */
+ hw->rindex = hw->tlen - 1;
+ memset(&hw->buffer[hw->tlen], 0, len);
+ hw->tlen += len;
+ do_transfer = true;
+ }
+
+ if (do_transfer && hw->tlen > 1) {
+ ret = sc18is602_wait_ready(hw, SC18IS602_BUFSIZ);
+ if (ret < 0)
+ return ret;
+ ret = i2c_master_send(hw->client, hw->buffer, hw->tlen);
+ if (ret < 0)
+ return ret;
+ if (ret != hw->tlen)
+ return -EIO;
+
+ if (t->rx_buf) {
+ int rlen = hw->rindex + len;
+
+ ret = sc18is602_wait_ready(hw, hw->tlen);
+ if (ret < 0)
+ return ret;
+ ret = i2c_master_recv(hw->client, hw->buffer, rlen);
+ if (ret < 0)
+ return ret;
+ if (ret != rlen)
+ return -EIO;
+ memcpy(t->rx_buf, &hw->buffer[hw->rindex], len);
+ }
+ hw->tlen = 0;
+ }
+ return len;
+}
+
+static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
+{
+ u8 ctrl = 0;
+ int ret;
+
+ if (mode & SPI_CPHA)
+ ctrl |= SC18IS602_MODE_CPHA;
+ if (mode & SPI_CPOL)
+ ctrl |= SC18IS602_MODE_CPOL;
+ if (mode & SPI_LSB_FIRST)
+ ctrl |= SC18IS602_MODE_LSB_FIRST;
+
+ /* Find the closest clock speed */
+ if (hz >= hw->freq / 4) {
+ ctrl |= SC18IS602_MODE_CLOCK_DIV_4;
+ hw->speed = hw->freq / 4;
+ } else if (hz >= hw->freq / 16) {
+ ctrl |= SC18IS602_MODE_CLOCK_DIV_16;
+ hw->speed = hw->freq / 16;
+ } else if (hz >= hw->freq / 64) {
+ ctrl |= SC18IS602_MODE_CLOCK_DIV_64;
+ hw->speed = hw->freq / 64;
+ } else {
+ ctrl |= SC18IS602_MODE_CLOCK_DIV_128;
+ hw->speed = hw->freq / 128;
+ }
+
+ /*
+ * Don't do anything if the control value did not change. The initial
+ * value of 0xff for hw->ctrl ensures that the correct mode will be set
+ * with the first call to this function.
+ */
+ if (ctrl == hw->ctrl)
+ return 0;
+
+ ret = i2c_smbus_write_byte_data(hw->client, 0xf0, ctrl);
+ if (ret < 0)
+ return ret;
+
+ hw->ctrl = ctrl;
+
+ return 0;
+}
+
+static int sc18is602_check_transfer(struct spi_device *spi,
+ struct spi_transfer *t, int tlen)
+{
+ if (t && t->len + tlen > SC18IS602_BUFSIZ + 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sc18is602_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct sc18is602 *hw = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t;
+ int status = 0;
+
+ hw->tlen = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ bool do_transfer;
+
+ status = sc18is602_check_transfer(spi, t, hw->tlen);
+ if (status < 0)
+ break;
+
+ status = sc18is602_setup_transfer(hw, t->speed_hz, spi->mode);
+ if (status < 0)
+ break;
+
+ do_transfer = t->cs_change || list_is_last(&t->transfer_list,
+ &m->transfers);
+
+ if (t->len) {
+ status = sc18is602_txrx(hw, m, t, do_transfer);
+ if (status < 0)
+ break;
+ m->actual_length += status;
+ }
+ status = 0;
+
+ spi_transfer_delay_exec(t);
+ }
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static size_t sc18is602_max_transfer_size(struct spi_device *spi)
+{
+ return SC18IS602_BUFSIZ;
+}
+
+static int sc18is602_setup(struct spi_device *spi)
+{
+ struct sc18is602 *hw = spi_master_get_devdata(spi->master);
+
+ /* SC18IS602 does not support CS2 */
+ if (hw->id == sc18is602 && spi->chip_select == 2)
+ return -ENXIO;
+
+ return 0;
+}
+
+static int sc18is602_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
+ struct sc18is602 *hw;
+ struct spi_master *master;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ return -EINVAL;
+
+ master = devm_spi_alloc_master(dev, sizeof(struct sc18is602));
+ if (!master)
+ return -ENOMEM;
+
+ hw = spi_master_get_devdata(master);
+ i2c_set_clientdata(client, hw);
+
+ /* assert reset and then release */
+ hw->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(hw->reset))
+ return PTR_ERR(hw->reset);
+ gpiod_set_value_cansleep(hw->reset, 0);
+
+ hw->master = master;
+ hw->client = client;
+ hw->dev = dev;
+ hw->ctrl = 0xff;
+
+ if (client->dev.of_node)
+ hw->id = (enum chips)of_device_get_match_data(&client->dev);
+ else
+ hw->id = id->driver_data;
+
+ switch (hw->id) {
+ case sc18is602:
+ case sc18is602b:
+ master->num_chipselect = 4;
+ hw->freq = SC18IS602_CLOCK;
+ break;
+ case sc18is603:
+ master->num_chipselect = 2;
+ if (pdata) {
+ hw->freq = pdata->clock_frequency;
+ } else {
+ const __be32 *val;
+ int len;
+
+ val = of_get_property(np, "clock-frequency", &len);
+ if (val && len >= sizeof(__be32))
+ hw->freq = be32_to_cpup(val);
+ }
+ if (!hw->freq)
+ hw->freq = SC18IS602_CLOCK;
+ break;
+ }
+ master->bus_num = np ? -1 : client->adapter->nr;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->setup = sc18is602_setup;
+ master->transfer_one_message = sc18is602_transfer_one;
+ master->max_transfer_size = sc18is602_max_transfer_size;
+ master->max_message_size = sc18is602_max_transfer_size;
+ master->dev.of_node = np;
+ master->min_speed_hz = hw->freq / 128;
+ master->max_speed_hz = hw->freq / 4;
+
+ return devm_spi_register_master(dev, master);
+}
+
+static const struct i2c_device_id sc18is602_id[] = {
+ { "sc18is602", sc18is602 },
+ { "sc18is602b", sc18is602b },
+ { "sc18is603", sc18is603 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sc18is602_id);
+
+static const struct of_device_id sc18is602_of_match[] = {
+ {
+ .compatible = "nxp,sc18is602",
+ .data = (void *)sc18is602
+ },
+ {
+ .compatible = "nxp,sc18is602b",
+ .data = (void *)sc18is602b
+ },
+ {
+ .compatible = "nxp,sc18is603",
+ .data = (void *)sc18is603
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sc18is602_of_match);
+
+static struct i2c_driver sc18is602_driver = {
+ .driver = {
+ .name = "sc18is602",
+ .of_match_table = of_match_ptr(sc18is602_of_match),
+ },
+ .probe = sc18is602_probe,
+ .id_table = sc18is602_id,
+};
+
+module_i2c_driver(sc18is602_driver);
+
+MODULE_DESCRIPTION("SC18IS602/603 SPI Master Driver");
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
new file mode 100644
index 000000000..a62034e2a
--- /dev/null
+++ b/drivers/spi/spi-sh-hspi.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SuperH HSPI bus driver
+ *
+ * Copyright (C) 2011 Kuninori Morimoto
+ *
+ * Based on spi-sh.c:
+ * Based on pxa2xx_spi.c:
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/sh_hspi.h>
+
+#define SPCR 0x00
+#define SPSR 0x04
+#define SPSCR 0x08
+#define SPTBR 0x0C
+#define SPRBR 0x10
+#define SPCR2 0x14
+
+/* SPSR */
+#define RXFL (1 << 2)
+
+struct hspi_priv {
+ void __iomem *addr;
+ struct spi_controller *ctlr;
+ struct device *dev;
+ struct clk *clk;
+};
+
+/*
+ * basic function
+ */
+static void hspi_write(struct hspi_priv *hspi, int reg, u32 val)
+{
+ iowrite32(val, hspi->addr + reg);
+}
+
+static u32 hspi_read(struct hspi_priv *hspi, int reg)
+{
+ return ioread32(hspi->addr + reg);
+}
+
+static void hspi_bit_set(struct hspi_priv *hspi, int reg, u32 mask, u32 set)
+{
+ u32 val = hspi_read(hspi, reg);
+
+ val &= ~mask;
+ val |= set & mask;
+
+ hspi_write(hspi, reg, val);
+}
+
+/*
+ * transfer function
+ */
+static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
+{
+ int t = 256;
+
+ while (t--) {
+ if ((mask & hspi_read(hspi, SPSR)) == val)
+ return 0;
+
+ udelay(10);
+ }
+
+ dev_err(hspi->dev, "timeout\n");
+ return -ETIMEDOUT;
+}
+
+/*
+ * spi master function
+ */
+
+#define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0)
+#define hspi_hw_cs_disable(hspi) hspi_hw_cs_ctrl(hspi, 1)
+static void hspi_hw_cs_ctrl(struct hspi_priv *hspi, int hi)
+{
+ hspi_bit_set(hspi, SPSCR, (1 << 6), (hi) << 6);
+}
+
+static void hspi_hw_setup(struct hspi_priv *hspi,
+ struct spi_message *msg,
+ struct spi_transfer *t)
+{
+ struct spi_device *spi = msg->spi;
+ struct device *dev = hspi->dev;
+ u32 spcr, idiv_clk;
+ u32 rate, best_rate, min, tmp;
+
+ /*
+ * find best IDIV/CLKCx settings
+ */
+ min = ~0;
+ best_rate = 0;
+ spcr = 0;
+ for (idiv_clk = 0x00; idiv_clk <= 0x3F; idiv_clk++) {
+ rate = clk_get_rate(hspi->clk);
+
+ /* IDIV calculation */
+ if (idiv_clk & (1 << 5))
+ rate /= 128;
+ else
+ rate /= 16;
+
+ /* CLKCx calculation */
+ rate /= (((idiv_clk & 0x1F) + 1) * 2);
+
+ /* save best settings */
+ tmp = abs(t->speed_hz - rate);
+ if (tmp < min) {
+ min = tmp;
+ spcr = idiv_clk;
+ best_rate = rate;
+ }
+ }
+
+ if (spi->mode & SPI_CPHA)
+ spcr |= 1 << 7;
+ if (spi->mode & SPI_CPOL)
+ spcr |= 1 << 6;
+
+ dev_dbg(dev, "speed %d/%d\n", t->speed_hz, best_rate);
+
+ hspi_write(hspi, SPCR, spcr);
+ hspi_write(hspi, SPSR, 0x0);
+ hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
+}
+
+static int hspi_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct hspi_priv *hspi = spi_controller_get_devdata(ctlr);
+ struct spi_transfer *t;
+ u32 tx;
+ u32 rx;
+ int ret, i;
+ unsigned int cs_change;
+ const int nsecs = 50;
+
+ dev_dbg(hspi->dev, "%s\n", __func__);
+
+ cs_change = 1;
+ ret = 0;
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+
+ if (cs_change) {
+ hspi_hw_setup(hspi, msg, t);
+ hspi_hw_cs_enable(hspi);
+ ndelay(nsecs);
+ }
+ cs_change = t->cs_change;
+
+ for (i = 0; i < t->len; i++) {
+
+ /* wait remains */
+ ret = hspi_status_check_timeout(hspi, 0x1, 0);
+ if (ret < 0)
+ break;
+
+ tx = 0;
+ if (t->tx_buf)
+ tx = (u32)((u8 *)t->tx_buf)[i];
+
+ hspi_write(hspi, SPTBR, tx);
+
+ /* wait receive */
+ ret = hspi_status_check_timeout(hspi, 0x4, 0x4);
+ if (ret < 0)
+ break;
+
+ rx = hspi_read(hspi, SPRBR);
+ if (t->rx_buf)
+ ((u8 *)t->rx_buf)[i] = (u8)rx;
+
+ }
+
+ msg->actual_length += t->len;
+
+ spi_transfer_delay_exec(t);
+
+ if (cs_change) {
+ ndelay(nsecs);
+ hspi_hw_cs_disable(hspi);
+ ndelay(nsecs);
+ }
+ }
+
+ msg->status = ret;
+ if (!cs_change) {
+ ndelay(nsecs);
+ hspi_hw_cs_disable(hspi);
+ }
+ spi_finalize_current_message(ctlr);
+
+ return ret;
+}
+
+static int hspi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct spi_controller *ctlr;
+ struct hspi_priv *hspi;
+ struct clk *clk;
+ int ret;
+
+ /* get base addr */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*hspi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "couldn't get clock\n");
+ ret = -EINVAL;
+ goto error0;
+ }
+
+ hspi = spi_controller_get_devdata(ctlr);
+ platform_set_drvdata(pdev, hspi);
+
+ /* init hspi */
+ hspi->ctlr = ctlr;
+ hspi->dev = &pdev->dev;
+ hspi->clk = clk;
+ hspi->addr = devm_ioremap(hspi->dev,
+ res->start, resource_size(res));
+ if (!hspi->addr) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ctlr->bus_num = pdev->id;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->auto_runtime_pm = true;
+ ctlr->transfer_one_message = hspi_transfer_one_message;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
+ goto error2;
+ }
+
+ return 0;
+
+ error2:
+ pm_runtime_disable(&pdev->dev);
+ error1:
+ clk_put(clk);
+ error0:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+static int hspi_remove(struct platform_device *pdev)
+{
+ struct hspi_priv *hspi = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ clk_put(hspi->clk);
+
+ return 0;
+}
+
+static const struct of_device_id hspi_of_match[] = {
+ { .compatible = "renesas,hspi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hspi_of_match);
+
+static struct platform_driver hspi_driver = {
+ .probe = hspi_probe,
+ .remove = hspi_remove,
+ .driver = {
+ .name = "sh-hspi",
+ .of_match_table = hspi_of_match,
+ },
+};
+module_platform_driver(hspi_driver);
+
+MODULE_DESCRIPTION("SuperH HSPI bus driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_ALIAS("platform:sh-hspi");
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
new file mode 100644
index 000000000..51ceaa485
--- /dev/null
+++ b/drivers/spi/spi-sh-msiof.c
@@ -0,0 +1,1446 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SuperH MSIOF SPI Controller Interface
+ *
+ * Copyright (c) 2009 Magnus Damm
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014-2017 Glider bvba
+ */
+
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+
+#include <linux/spi/sh_msiof.h>
+#include <linux/spi/spi.h>
+
+#include <asm/unaligned.h>
+
+#define SH_MSIOF_FLAG_FIXED_DTDL_200 BIT(0)
+
+struct sh_msiof_chipdata {
+ u32 bits_per_word_mask;
+ u16 tx_fifo_size;
+ u16 rx_fifo_size;
+ u16 ctlr_flags;
+ u16 min_div_pow;
+ u32 flags;
+};
+
+struct sh_msiof_spi_priv {
+ struct spi_controller *ctlr;
+ void __iomem *mapbase;
+ struct clk *clk;
+ struct platform_device *pdev;
+ struct sh_msiof_spi_info *info;
+ struct completion done;
+ struct completion done_txdma;
+ unsigned int tx_fifo_size;
+ unsigned int rx_fifo_size;
+ unsigned int min_div_pow;
+ void *tx_dma_page;
+ void *rx_dma_page;
+ dma_addr_t tx_dma_addr;
+ dma_addr_t rx_dma_addr;
+ bool native_cs_inited;
+ bool native_cs_high;
+ bool slave_aborted;
+};
+
+#define MAX_SS 3 /* Maximum number of native chip selects */
+
+#define SITMDR1 0x00 /* Transmit Mode Register 1 */
+#define SITMDR2 0x04 /* Transmit Mode Register 2 */
+#define SITMDR3 0x08 /* Transmit Mode Register 3 */
+#define SIRMDR1 0x10 /* Receive Mode Register 1 */
+#define SIRMDR2 0x14 /* Receive Mode Register 2 */
+#define SIRMDR3 0x18 /* Receive Mode Register 3 */
+#define SITSCR 0x20 /* Transmit Clock Select Register */
+#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR 0x28 /* Control Register */
+#define SIFCTR 0x30 /* FIFO Control Register */
+#define SISTR 0x40 /* Status Register */
+#define SIIER 0x44 /* Interrupt Enable Register */
+#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50 /* Transmit FIFO Data Register */
+#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60 /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */
+#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */
+#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_FLD_SHIFT 2
+#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
+#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
+#define SISCR_BRPS(i) (((i) - 1) << 8)
+#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+#define SISCR_BRDV_DIV_2 0
+#define SISCR_BRDV_DIV_4 1
+#define SISCR_BRDV_DIV_8 2
+#define SISCR_BRDV_DIV_16 3
+#define SISCR_BRDV_DIV_32 4
+#define SISCR_BRDV_DIV_1 7
+
+/* SICTR */
+#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */
+#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */
+#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
+#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE BIT(9) /* Transmit Enable */
+#define SICTR_RXE BIT(8) /* Receive Enable */
+#define SICTR_TXRST BIT(1) /* Transmit Reset */
+#define SICTR_RXRST BIT(0) /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_TFUA_SHIFT 20
+#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
+#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+#define SIFCTR_RFUA_SHIFT 4
+#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT)
+
+/* SISTR */
+#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF BIT(7) /* Frame Reception End */
+#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
+#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
+
+
+static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
+{
+ switch (reg_offs) {
+ case SITSCR:
+ case SIRSCR:
+ return ioread16(p->mapbase + reg_offs);
+ default:
+ return ioread32(p->mapbase + reg_offs);
+ }
+}
+
+static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
+ u32 value)
+{
+ switch (reg_offs) {
+ case SITSCR:
+ case SIRSCR:
+ iowrite16(value, p->mapbase + reg_offs);
+ break;
+ default:
+ iowrite32(value, p->mapbase + reg_offs);
+ break;
+ }
+}
+
+static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
+ u32 clr, u32 set)
+{
+ u32 mask = clr | set;
+ u32 data;
+
+ data = sh_msiof_read(p, SICTR);
+ data &= ~clr;
+ data |= set;
+ sh_msiof_write(p, SICTR, data);
+
+ return readl_poll_timeout_atomic(p->mapbase + SICTR, data,
+ (data & mask) == set, 1, 100);
+}
+
+static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
+{
+ struct sh_msiof_spi_priv *p = data;
+
+ /* just disable the interrupt and wake up */
+ sh_msiof_write(p, SIIER, 0);
+ complete(&p->done);
+
+ return IRQ_HANDLED;
+}
+
+static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
+{
+ u32 mask = SICTR_TXRST | SICTR_RXRST;
+ u32 data;
+
+ data = sh_msiof_read(p, SICTR);
+ data |= mask;
+ sh_msiof_write(p, SICTR, data);
+
+ readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1,
+ 100);
+}
+
+static const u32 sh_msiof_spi_div_array[] = {
+ SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
+ SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
+};
+
+static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
+ struct spi_transfer *t)
+{
+ unsigned long parent_rate = clk_get_rate(p->clk);
+ unsigned int div_pow = p->min_div_pow;
+ u32 spi_hz = t->speed_hz;
+ unsigned long div;
+ u32 brps, scr;
+
+ if (!spi_hz || !parent_rate) {
+ WARN(1, "Invalid clock rate parameters %lu and %u\n",
+ parent_rate, spi_hz);
+ return;
+ }
+
+ div = DIV_ROUND_UP(parent_rate, spi_hz);
+ if (div <= 1024) {
+ /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+ if (!div_pow && div <= 32 && div > 2)
+ div_pow = 1;
+
+ if (div_pow)
+ brps = (div + 1) >> div_pow;
+ else
+ brps = div;
+
+ for (; brps > 32; div_pow++)
+ brps = (brps + 1) >> 1;
+ } else {
+ /* Set transfer rate composite divisor to 2^5 * 32 = 1024 */
+ dev_err(&p->pdev->dev,
+ "Requested SPI transfer rate %d is too low\n", spi_hz);
+ div_pow = 5;
+ brps = 32;
+ }
+
+ t->effective_speed_hz = parent_rate / (brps << div_pow);
+
+ scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
+ sh_msiof_write(p, SITSCR, scr);
+ if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
+ sh_msiof_write(p, SIRSCR, scr);
+}
+
+static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
+{
+ /*
+ * DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl
+ * b'000 : 0
+ * b'001 : 100
+ * b'010 : 200
+ * b'011 (SYNCDL only) : 300
+ * b'101 : 50
+ * b'110 : 150
+ */
+ if (dtdl_or_syncdl % 100)
+ return dtdl_or_syncdl / 100 + 5;
+ else
+ return dtdl_or_syncdl / 100;
+}
+
+static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
+{
+ u32 val;
+
+ if (!p->info)
+ return 0;
+
+ /* check if DTDL and SYNCDL is allowed value */
+ if (p->info->dtdl > 200 || p->info->syncdl > 300) {
+ dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
+ return 0;
+ }
+
+ /* check if the sum of DTDL and SYNCDL becomes an integer value */
+ if ((p->info->dtdl + p->info->syncdl) % 100) {
+ dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
+ return 0;
+ }
+
+ val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
+ val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
+
+ return val;
+}
+
+static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
+ u32 cpol, u32 cpha,
+ u32 tx_hi_z, u32 lsb_first, u32 cs_high)
+{
+ u32 tmp;
+ int edge;
+
+ /*
+ * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG
+ * 0 0 10 10 1 1
+ * 0 1 10 10 0 0
+ * 1 0 11 11 0 0
+ * 1 1 11 11 1 1
+ */
+ tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
+ tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
+ tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
+ tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
+ if (spi_controller_is_slave(p->ctlr)) {
+ sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
+ } else {
+ sh_msiof_write(p, SITMDR1,
+ tmp | SIMDR1_TRMD | SITMDR1_PCON |
+ (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
+ }
+ if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
+ /* These bits are reserved if RX needs TX */
+ tmp &= ~0x0000ffff;
+ }
+ sh_msiof_write(p, SIRMDR1, tmp);
+
+ tmp = 0;
+ tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
+ tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
+
+ edge = cpol ^ !cpha;
+
+ tmp |= edge << SICTR_TEDG_SHIFT;
+ tmp |= edge << SICTR_REDG_SHIFT;
+ tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
+ sh_msiof_write(p, SICTR, tmp);
+}
+
+static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, void *rx_buf,
+ u32 bits, u32 words)
+{
+ u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
+
+ if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
+ sh_msiof_write(p, SITMDR2, dr2);
+ else
+ sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
+
+ if (rx_buf)
+ sh_msiof_write(p, SIRMDR2, dr2);
+}
+
+static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
+{
+ sh_msiof_write(p, SISTR,
+ sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ));
+}
+
+static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u8 *buf_8 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, SITFDR, buf_8[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u16 *buf_16 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, SITFDR, buf_16[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u16 *buf_16 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
+}
+
+static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, SITFDR, buf_32[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
+}
+
+static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
+}
+
+static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
+}
+
+static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u8 *buf_8 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u16 *buf_16 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u16 *buf_16 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
+}
+
+static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
+}
+
+static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
+}
+
+static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
+}
+
+static int sh_msiof_spi_setup(struct spi_device *spi)
+{
+ struct sh_msiof_spi_priv *p =
+ spi_controller_get_devdata(spi->controller);
+ u32 clr, set, tmp;
+
+ if (spi->cs_gpiod || spi_controller_is_slave(p->ctlr))
+ return 0;
+
+ if (p->native_cs_inited &&
+ (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
+ return 0;
+
+ /* Configure native chip select mode/polarity early */
+ clr = SIMDR1_SYNCMD_MASK;
+ set = SIMDR1_SYNCMD_SPI;
+ if (spi->mode & SPI_CS_HIGH)
+ clr |= BIT(SIMDR1_SYNCAC_SHIFT);
+ else
+ set |= BIT(SIMDR1_SYNCAC_SHIFT);
+ pm_runtime_get_sync(&p->pdev->dev);
+ tmp = sh_msiof_read(p, SITMDR1) & ~clr;
+ sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
+ tmp = sh_msiof_read(p, SIRMDR1) & ~clr;
+ sh_msiof_write(p, SIRMDR1, tmp | set);
+ pm_runtime_put(&p->pdev->dev);
+ p->native_cs_high = spi->mode & SPI_CS_HIGH;
+ p->native_cs_inited = true;
+ return 0;
+}
+
+static int sh_msiof_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
+ const struct spi_device *spi = msg->spi;
+ u32 ss, cs_high;
+
+ /* Configure pins before asserting CS */
+ if (spi->cs_gpiod) {
+ ss = ctlr->unused_native_cs;
+ cs_high = p->native_cs_high;
+ } else {
+ ss = spi->chip_select;
+ cs_high = !!(spi->mode & SPI_CS_HIGH);
+ }
+ sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST), cs_high);
+ return 0;
+}
+
+static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
+{
+ bool slave = spi_controller_is_slave(p->ctlr);
+ int ret = 0;
+
+ /* setup clock and rx/tx signals */
+ if (!slave)
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
+ if (rx_buf && !ret)
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
+ if (!ret)
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
+
+ /* start by setting frame bit */
+ if (!ret && !slave)
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
+
+ return ret;
+}
+
+static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
+{
+ bool slave = spi_controller_is_slave(p->ctlr);
+ int ret = 0;
+
+ /* shut down frame, rx/tx and clock signals */
+ if (!slave)
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
+ if (!ret)
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
+ if (rx_buf && !ret)
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
+ if (!ret && !slave)
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
+
+ return ret;
+}
+
+static int sh_msiof_slave_abort(struct spi_controller *ctlr)
+{
+ struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
+
+ p->slave_aborted = true;
+ complete(&p->done);
+ complete(&p->done_txdma);
+ return 0;
+}
+
+static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
+ struct completion *x)
+{
+ if (spi_controller_is_slave(p->ctlr)) {
+ if (wait_for_completion_interruptible(x) ||
+ p->slave_aborted) {
+ dev_dbg(&p->pdev->dev, "interrupted\n");
+ return -EINTR;
+ }
+ } else {
+ if (!wait_for_completion_timeout(x, HZ)) {
+ dev_err(&p->pdev->dev, "timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
+ void (*tx_fifo)(struct sh_msiof_spi_priv *,
+ const void *, int, int),
+ void (*rx_fifo)(struct sh_msiof_spi_priv *,
+ void *, int, int),
+ const void *tx_buf, void *rx_buf,
+ int words, int bits)
+{
+ int fifo_shift;
+ int ret;
+
+ /* limit maximum word transfer to rx/tx fifo size */
+ if (tx_buf)
+ words = min_t(int, words, p->tx_fifo_size);
+ if (rx_buf)
+ words = min_t(int, words, p->rx_fifo_size);
+
+ /* the fifo contents need shifting */
+ fifo_shift = 32 - bits;
+
+ /* default FIFO watermarks for PIO */
+ sh_msiof_write(p, SIFCTR, 0);
+
+ /* setup msiof transfer mode registers */
+ sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
+ sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
+
+ /* write tx fifo */
+ if (tx_buf)
+ tx_fifo(p, tx_buf, words, fifo_shift);
+
+ reinit_completion(&p->done);
+ p->slave_aborted = false;
+
+ ret = sh_msiof_spi_start(p, rx_buf);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to start hardware\n");
+ goto stop_ier;
+ }
+
+ /* wait for tx fifo to be emptied / rx fifo to be filled */
+ ret = sh_msiof_wait_for_completion(p, &p->done);
+ if (ret)
+ goto stop_reset;
+
+ /* read rx fifo */
+ if (rx_buf)
+ rx_fifo(p, rx_buf, words, fifo_shift);
+
+ /* clear status bits */
+ sh_msiof_reset_str(p);
+
+ ret = sh_msiof_spi_stop(p, rx_buf);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to shut down hardware\n");
+ return ret;
+ }
+
+ return words;
+
+stop_reset:
+ sh_msiof_reset_str(p);
+ sh_msiof_spi_stop(p, rx_buf);
+stop_ier:
+ sh_msiof_write(p, SIIER, 0);
+ return ret;
+}
+
+static void sh_msiof_dma_complete(void *arg)
+{
+ complete(arg);
+}
+
+static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
+ void *rx, unsigned int len)
+{
+ u32 ier_bits = 0;
+ struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ dma_cookie_t cookie;
+ int ret;
+
+ /* First prepare and submit the DMA request(s), as this may fail */
+ if (rx) {
+ ier_bits |= SIIER_RDREQE | SIIER_RDMAE;
+ desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
+ p->rx_dma_addr, len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx)
+ return -EAGAIN;
+
+ desc_rx->callback = sh_msiof_dma_complete;
+ desc_rx->callback_param = &p->done;
+ cookie = dmaengine_submit(desc_rx);
+ if (dma_submit_error(cookie))
+ return cookie;
+ }
+
+ if (tx) {
+ ier_bits |= SIIER_TDREQE | SIIER_TDMAE;
+ dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
+ p->tx_dma_addr, len, DMA_TO_DEVICE);
+ desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
+ p->tx_dma_addr, len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ ret = -EAGAIN;
+ goto no_dma_tx;
+ }
+
+ desc_tx->callback = sh_msiof_dma_complete;
+ desc_tx->callback_param = &p->done_txdma;
+ cookie = dmaengine_submit(desc_tx);
+ if (dma_submit_error(cookie)) {
+ ret = cookie;
+ goto no_dma_tx;
+ }
+ }
+
+ /* 1 stage FIFO watermarks for DMA */
+ sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
+
+ /* setup msiof transfer mode registers (32-bit words) */
+ sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
+
+ sh_msiof_write(p, SIIER, ier_bits);
+
+ reinit_completion(&p->done);
+ if (tx)
+ reinit_completion(&p->done_txdma);
+ p->slave_aborted = false;
+
+ /* Now start DMA */
+ if (rx)
+ dma_async_issue_pending(p->ctlr->dma_rx);
+ if (tx)
+ dma_async_issue_pending(p->ctlr->dma_tx);
+
+ ret = sh_msiof_spi_start(p, rx);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to start hardware\n");
+ goto stop_dma;
+ }
+
+ if (tx) {
+ /* wait for tx DMA completion */
+ ret = sh_msiof_wait_for_completion(p, &p->done_txdma);
+ if (ret)
+ goto stop_reset;
+ }
+
+ if (rx) {
+ /* wait for rx DMA completion */
+ ret = sh_msiof_wait_for_completion(p, &p->done);
+ if (ret)
+ goto stop_reset;
+
+ sh_msiof_write(p, SIIER, 0);
+ } else {
+ /* wait for tx fifo to be emptied */
+ sh_msiof_write(p, SIIER, SIIER_TEOFE);
+ ret = sh_msiof_wait_for_completion(p, &p->done);
+ if (ret)
+ goto stop_reset;
+ }
+
+ /* clear status bits */
+ sh_msiof_reset_str(p);
+
+ ret = sh_msiof_spi_stop(p, rx);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to shut down hardware\n");
+ return ret;
+ }
+
+ if (rx)
+ dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev,
+ p->rx_dma_addr, len, DMA_FROM_DEVICE);
+
+ return 0;
+
+stop_reset:
+ sh_msiof_reset_str(p);
+ sh_msiof_spi_stop(p, rx);
+stop_dma:
+ if (tx)
+ dmaengine_terminate_sync(p->ctlr->dma_tx);
+no_dma_tx:
+ if (rx)
+ dmaengine_terminate_sync(p->ctlr->dma_rx);
+ sh_msiof_write(p, SIIER, 0);
+ return ret;
+}
+
+static void copy_bswap32(u32 *dst, const u32 *src, unsigned int words)
+{
+ /* src or dst can be unaligned, but not both */
+ if ((unsigned long)src & 3) {
+ while (words--) {
+ *dst++ = swab32(get_unaligned(src));
+ src++;
+ }
+ } else if ((unsigned long)dst & 3) {
+ while (words--) {
+ put_unaligned(swab32(*src++), dst);
+ dst++;
+ }
+ } else {
+ while (words--)
+ *dst++ = swab32(*src++);
+ }
+}
+
+static void copy_wswap32(u32 *dst, const u32 *src, unsigned int words)
+{
+ /* src or dst can be unaligned, but not both */
+ if ((unsigned long)src & 3) {
+ while (words--) {
+ *dst++ = swahw32(get_unaligned(src));
+ src++;
+ }
+ } else if ((unsigned long)dst & 3) {
+ while (words--) {
+ put_unaligned(swahw32(*src++), dst);
+ dst++;
+ }
+ } else {
+ while (words--)
+ *dst++ = swahw32(*src++);
+ }
+}
+
+static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
+{
+ memcpy(dst, src, words * 4);
+}
+
+static int sh_msiof_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
+ void (*copy32)(u32 *, const u32 *, unsigned int);
+ void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
+ void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
+ const void *tx_buf = t->tx_buf;
+ void *rx_buf = t->rx_buf;
+ unsigned int len = t->len;
+ unsigned int bits = t->bits_per_word;
+ unsigned int bytes_per_word;
+ unsigned int words;
+ int n;
+ bool swab;
+ int ret;
+
+ /* reset registers */
+ sh_msiof_spi_reset_regs(p);
+
+ /* setup clocks (clock already enabled in chipselect()) */
+ if (!spi_controller_is_slave(p->ctlr))
+ sh_msiof_spi_set_clk_regs(p, t);
+
+ while (ctlr->dma_tx && len > 15) {
+ /*
+ * DMA supports 32-bit words only, hence pack 8-bit and 16-bit
+ * words, with byte resp. word swapping.
+ */
+ unsigned int l = 0;
+
+ if (tx_buf)
+ l = min(round_down(len, 4), p->tx_fifo_size * 4);
+ if (rx_buf)
+ l = min(round_down(len, 4), p->rx_fifo_size * 4);
+
+ if (bits <= 8) {
+ copy32 = copy_bswap32;
+ } else if (bits <= 16) {
+ copy32 = copy_wswap32;
+ } else {
+ copy32 = copy_plain32;
+ }
+
+ if (tx_buf)
+ copy32(p->tx_dma_page, tx_buf, l / 4);
+
+ ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
+ if (ret == -EAGAIN) {
+ dev_warn_once(&p->pdev->dev,
+ "DMA not available, falling back to PIO\n");
+ break;
+ }
+ if (ret)
+ return ret;
+
+ if (rx_buf) {
+ copy32(rx_buf, p->rx_dma_page, l / 4);
+ rx_buf += l;
+ }
+ if (tx_buf)
+ tx_buf += l;
+
+ len -= l;
+ if (!len)
+ return 0;
+ }
+
+ if (bits <= 8 && len > 15) {
+ bits = 32;
+ swab = true;
+ } else {
+ swab = false;
+ }
+
+ /* setup bytes per word and fifo read/write functions */
+ if (bits <= 8) {
+ bytes_per_word = 1;
+ tx_fifo = sh_msiof_spi_write_fifo_8;
+ rx_fifo = sh_msiof_spi_read_fifo_8;
+ } else if (bits <= 16) {
+ bytes_per_word = 2;
+ if ((unsigned long)tx_buf & 0x01)
+ tx_fifo = sh_msiof_spi_write_fifo_16u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_16;
+
+ if ((unsigned long)rx_buf & 0x01)
+ rx_fifo = sh_msiof_spi_read_fifo_16u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_16;
+ } else if (swab) {
+ bytes_per_word = 4;
+ if ((unsigned long)tx_buf & 0x03)
+ tx_fifo = sh_msiof_spi_write_fifo_s32u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_s32;
+
+ if ((unsigned long)rx_buf & 0x03)
+ rx_fifo = sh_msiof_spi_read_fifo_s32u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_s32;
+ } else {
+ bytes_per_word = 4;
+ if ((unsigned long)tx_buf & 0x03)
+ tx_fifo = sh_msiof_spi_write_fifo_32u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_32;
+
+ if ((unsigned long)rx_buf & 0x03)
+ rx_fifo = sh_msiof_spi_read_fifo_32u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_32;
+ }
+
+ /* transfer in fifo sized chunks */
+ words = len / bytes_per_word;
+
+ while (words > 0) {
+ n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf,
+ words, bits);
+ if (n < 0)
+ return n;
+
+ if (tx_buf)
+ tx_buf += n * bytes_per_word;
+ if (rx_buf)
+ rx_buf += n * bytes_per_word;
+ words -= n;
+
+ if (words == 0 && (len % bytes_per_word)) {
+ words = len % bytes_per_word;
+ bits = t->bits_per_word;
+ bytes_per_word = 1;
+ tx_fifo = sh_msiof_spi_write_fifo_8;
+ rx_fifo = sh_msiof_spi_read_fifo_8;
+ }
+ }
+
+ return 0;
+}
+
+static const struct sh_msiof_chipdata sh_data = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32),
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .ctlr_flags = 0,
+ .min_div_pow = 0,
+};
+
+static const struct sh_msiof_chipdata rcar_gen2_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
+ .min_div_pow = 0,
+};
+
+static const struct sh_msiof_chipdata rcar_gen3_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
+ .min_div_pow = 1,
+};
+
+static const struct sh_msiof_chipdata rcar_r8a7795_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
+ .min_div_pow = 1,
+ .flags = SH_MSIOF_FLAG_FIXED_DTDL_200,
+};
+
+static const struct of_device_id sh_msiof_match[] = {
+ { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data },
+ { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data },
+ { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
+ { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
+ { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data },
+ { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_msiof_match);
+
+#ifdef CONFIG_OF
+static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
+{
+ struct sh_msiof_spi_info *info;
+ struct device_node *np = dev->of_node;
+ u32 num_cs = 1;
+
+ info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_SLAVE
+ : MSIOF_SPI_MASTER;
+
+ /* Parse the MSIOF properties */
+ if (info->mode == MSIOF_SPI_MASTER)
+ of_property_read_u32(np, "num-cs", &num_cs);
+ of_property_read_u32(np, "renesas,tx-fifo-size",
+ &info->tx_fifo_override);
+ of_property_read_u32(np, "renesas,rx-fifo-size",
+ &info->rx_fifo_override);
+ of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
+ of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
+
+ info->num_chipselect = num_cs;
+
+ return info;
+}
+#else
+static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
+ enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ (void *)(unsigned long)id, dev,
+ dir == DMA_MEM_TO_DEV ? "tx" : "rx");
+ if (!chan) {
+ dev_warn(dev, "dma_request_slave_channel_compat failed\n");
+ return NULL;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = dir;
+ if (dir == DMA_MEM_TO_DEV) {
+ cfg.dst_addr = port_addr;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ } else {
+ cfg.src_addr = port_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
+ dma_release_channel(chan);
+ return NULL;
+ }
+
+ return chan;
+}
+
+static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
+{
+ struct platform_device *pdev = p->pdev;
+ struct device *dev = &pdev->dev;
+ const struct sh_msiof_spi_info *info = p->info;
+ unsigned int dma_tx_id, dma_rx_id;
+ const struct resource *res;
+ struct spi_controller *ctlr;
+ struct device *tx_dev, *rx_dev;
+
+ if (dev->of_node) {
+ /* In the OF case we will get the slave IDs from the DT */
+ dma_tx_id = 0;
+ dma_rx_id = 0;
+ } else if (info && info->dma_tx_id && info->dma_rx_id) {
+ dma_tx_id = info->dma_tx_id;
+ dma_rx_id = info->dma_rx_id;
+ } else {
+ /* The driver assumes no error */
+ return 0;
+ }
+
+ /* The DMA engine uses the second register set, if present */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ ctlr = p->ctlr;
+ ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
+ dma_tx_id, res->start + SITFDR);
+ if (!ctlr->dma_tx)
+ return -ENODEV;
+
+ ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
+ dma_rx_id, res->start + SIRFDR);
+ if (!ctlr->dma_rx)
+ goto free_tx_chan;
+
+ p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!p->tx_dma_page)
+ goto free_rx_chan;
+
+ p->rx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!p->rx_dma_page)
+ goto free_tx_page;
+
+ tx_dev = ctlr->dma_tx->device->dev;
+ p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_dev, p->tx_dma_addr))
+ goto free_rx_page;
+
+ rx_dev = ctlr->dma_rx->device->dev;
+ p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_dev, p->rx_dma_addr))
+ goto unmap_tx_page;
+
+ dev_info(dev, "DMA available");
+ return 0;
+
+unmap_tx_page:
+ dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
+free_rx_page:
+ free_page((unsigned long)p->rx_dma_page);
+free_tx_page:
+ free_page((unsigned long)p->tx_dma_page);
+free_rx_chan:
+ dma_release_channel(ctlr->dma_rx);
+free_tx_chan:
+ dma_release_channel(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
+ return -ENODEV;
+}
+
+static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
+{
+ struct spi_controller *ctlr = p->ctlr;
+
+ if (!ctlr->dma_tx)
+ return;
+
+ dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ free_page((unsigned long)p->rx_dma_page);
+ free_page((unsigned long)p->tx_dma_page);
+ dma_release_channel(ctlr->dma_rx);
+ dma_release_channel(ctlr->dma_tx);
+}
+
+static int sh_msiof_spi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ const struct sh_msiof_chipdata *chipdata;
+ struct sh_msiof_spi_info *info;
+ struct sh_msiof_spi_priv *p;
+ unsigned long clksrc;
+ int i;
+ int ret;
+
+ chipdata = of_device_get_match_data(&pdev->dev);
+ if (chipdata) {
+ info = sh_msiof_spi_parse_dt(&pdev->dev);
+ } else {
+ chipdata = (const void *)pdev->id_entry->driver_data;
+ info = dev_get_platdata(&pdev->dev);
+ }
+
+ if (!info) {
+ dev_err(&pdev->dev, "failed to obtain device info\n");
+ return -ENXIO;
+ }
+
+ if (chipdata->flags & SH_MSIOF_FLAG_FIXED_DTDL_200)
+ info->dtdl = 200;
+
+ if (info->mode == MSIOF_SPI_SLAVE)
+ ctlr = spi_alloc_slave(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
+ else
+ ctlr = spi_alloc_master(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
+ if (ctlr == NULL)
+ return -ENOMEM;
+
+ p = spi_controller_get_devdata(ctlr);
+
+ platform_set_drvdata(pdev, p);
+ p->ctlr = ctlr;
+ p->info = info;
+ p->min_div_pow = chipdata->min_div_pow;
+
+ init_completion(&p->done);
+ init_completion(&p->done_txdma);
+
+ p->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(p->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ i = platform_get_irq(pdev, 0);
+ if (i < 0) {
+ ret = i;
+ goto err1;
+ }
+
+ p->mapbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(p->mapbase)) {
+ ret = PTR_ERR(p->mapbase);
+ goto err1;
+ }
+
+ ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
+ dev_name(&pdev->dev), p);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request irq\n");
+ goto err1;
+ }
+
+ p->pdev = pdev;
+ pm_runtime_enable(&pdev->dev);
+
+ /* Platform data may override FIFO sizes */
+ p->tx_fifo_size = chipdata->tx_fifo_size;
+ p->rx_fifo_size = chipdata->rx_fifo_size;
+ if (p->info->tx_fifo_override)
+ p->tx_fifo_size = p->info->tx_fifo_override;
+ if (p->info->rx_fifo_override)
+ p->rx_fifo_size = p->info->rx_fifo_override;
+
+ /* init controller code */
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
+ clksrc = clk_get_rate(p->clk);
+ ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, 1024);
+ ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, 1 << p->min_div_pow);
+ ctlr->flags = chipdata->ctlr_flags;
+ ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = p->info->num_chipselect;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->setup = sh_msiof_spi_setup;
+ ctlr->prepare_message = sh_msiof_prepare_message;
+ ctlr->slave_abort = sh_msiof_slave_abort;
+ ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
+ ctlr->auto_runtime_pm = true;
+ ctlr->transfer_one = sh_msiof_transfer_one;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = MAX_SS;
+
+ ret = sh_msiof_request_dma(p);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "DMA not available, using PIO\n");
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
+ goto err2;
+ }
+
+ return 0;
+
+ err2:
+ sh_msiof_release_dma(p);
+ pm_runtime_disable(&pdev->dev);
+ err1:
+ spi_controller_put(ctlr);
+ return ret;
+}
+
+static int sh_msiof_spi_remove(struct platform_device *pdev)
+{
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ sh_msiof_release_dma(p);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct platform_device_id spi_driver_ids[] = {
+ { "spi_sh_msiof", (kernel_ulong_t)&sh_data },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+ struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
+
+ return spi_controller_suspend(p->ctlr);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+ struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
+
+ return spi_controller_resume(p->ctlr);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+ sh_msiof_spi_resume);
+#define DEV_PM_OPS (&sh_msiof_spi_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver sh_msiof_spi_drv = {
+ .probe = sh_msiof_spi_probe,
+ .remove = sh_msiof_spi_remove,
+ .id_table = spi_driver_ids,
+ .driver = {
+ .name = "spi_sh_msiof",
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(sh_msiof_match),
+ },
+};
+module_platform_driver(sh_msiof_spi_drv);
+
+MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
new file mode 100644
index 000000000..8f30531e1
--- /dev/null
+++ b/drivers/spi/spi-sh-sci.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SH SCI SPI interface
+ *
+ * Copyright (c) 2008 Magnus Damm
+ *
+ * Based on S3C24XX GPIO based SPI driver, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/module.h>
+
+#include <asm/spi.h>
+#include <asm/io.h>
+
+struct sh_sci_spi {
+ struct spi_bitbang bitbang;
+
+ void __iomem *membase;
+ unsigned char val;
+ struct sh_spi_info *info;
+ struct platform_device *dev;
+};
+
+#define SCSPTR(sp) (sp->membase + 0x1c)
+#define PIN_SCK (1 << 2)
+#define PIN_TXD (1 << 0)
+#define PIN_RXD PIN_TXD
+#define PIN_INIT ((1 << 1) | (1 << 3) | PIN_SCK | PIN_TXD)
+
+static inline void setbits(struct sh_sci_spi *sp, int bits, int on)
+{
+ /*
+ * We are the only user of SCSPTR so no locking is required.
+ * Reading bit 2 and 0 in SCSPTR gives pin state as input.
+ * Writing the same bits sets the output value.
+ * This makes regular read-modify-write difficult so we
+ * use sp->val to keep track of the latest register value.
+ */
+
+ if (on)
+ sp->val |= bits;
+ else
+ sp->val &= ~bits;
+
+ iowrite8(sp->val, SCSPTR(sp));
+}
+
+static inline void setsck(struct spi_device *dev, int on)
+{
+ setbits(spi_master_get_devdata(dev->master), PIN_SCK, on);
+}
+
+static inline void setmosi(struct spi_device *dev, int on)
+{
+ setbits(spi_master_get_devdata(dev->master), PIN_TXD, on);
+}
+
+static inline u32 getmiso(struct spi_device *dev)
+{
+ struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
+
+ return (ioread8(SCSPTR(sp)) & PIN_RXD) ? 1 : 0;
+}
+
+#define spidelay(x) ndelay(x)
+
+#include "spi-bitbang-txrx.h"
+
+static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
+}
+
+static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
+{
+ struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
+
+ if (sp->info->chip_select)
+ (sp->info->chip_select)(sp->info, dev->chip_select, value);
+}
+
+static int sh_sci_spi_probe(struct platform_device *dev)
+{
+ struct resource *r;
+ struct spi_master *master;
+ struct sh_sci_spi *sp;
+ int ret;
+
+ master = spi_alloc_master(&dev->dev, sizeof(struct sh_sci_spi));
+ if (master == NULL) {
+ dev_err(&dev->dev, "failed to allocate spi master\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ sp = spi_master_get_devdata(master);
+
+ platform_set_drvdata(dev, sp);
+ sp->info = dev_get_platdata(&dev->dev);
+ if (!sp->info) {
+ dev_err(&dev->dev, "platform data is missing\n");
+ ret = -ENOENT;
+ goto err1;
+ }
+
+ /* setup spi bitbang adaptor */
+ sp->bitbang.master = master;
+ sp->bitbang.master->bus_num = sp->info->bus_num;
+ sp->bitbang.master->num_chipselect = sp->info->num_chipselect;
+ sp->bitbang.chipselect = sh_sci_spi_chipselect;
+
+ sp->bitbang.txrx_word[SPI_MODE_0] = sh_sci_spi_txrx_mode0;
+ sp->bitbang.txrx_word[SPI_MODE_1] = sh_sci_spi_txrx_mode1;
+ sp->bitbang.txrx_word[SPI_MODE_2] = sh_sci_spi_txrx_mode2;
+ sp->bitbang.txrx_word[SPI_MODE_3] = sh_sci_spi_txrx_mode3;
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENOENT;
+ goto err1;
+ }
+ sp->membase = ioremap(r->start, resource_size(r));
+ if (!sp->membase) {
+ ret = -ENXIO;
+ goto err1;
+ }
+ sp->val = ioread8(SCSPTR(sp));
+ setbits(sp, PIN_INIT, 1);
+
+ ret = spi_bitbang_start(&sp->bitbang);
+ if (!ret)
+ return 0;
+
+ setbits(sp, PIN_INIT, 0);
+ iounmap(sp->membase);
+ err1:
+ spi_master_put(sp->bitbang.master);
+ err0:
+ return ret;
+}
+
+static int sh_sci_spi_remove(struct platform_device *dev)
+{
+ struct sh_sci_spi *sp = platform_get_drvdata(dev);
+
+ spi_bitbang_stop(&sp->bitbang);
+ setbits(sp, PIN_INIT, 0);
+ iounmap(sp->membase);
+ spi_master_put(sp->bitbang.master);
+ return 0;
+}
+
+static struct platform_driver sh_sci_spi_drv = {
+ .probe = sh_sci_spi_probe,
+ .remove = sh_sci_spi_remove,
+ .driver = {
+ .name = "spi_sh_sci",
+ },
+};
+module_platform_driver(sh_sci_spi_drv);
+
+MODULE_DESCRIPTION("SH SCI SPI Driver");
+MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:spi_sh_sci");
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
new file mode 100644
index 000000000..3e72fad99
--- /dev/null
+++ b/drivers/spi/spi-sh.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SH SPI bus driver
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ *
+ * Based on pxa2xx_spi.c:
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+
+#define SPI_SH_TBR 0x00
+#define SPI_SH_RBR 0x00
+#define SPI_SH_CR1 0x08
+#define SPI_SH_CR2 0x10
+#define SPI_SH_CR3 0x18
+#define SPI_SH_CR4 0x20
+#define SPI_SH_CR5 0x28
+
+/* CR1 */
+#define SPI_SH_TBE 0x80
+#define SPI_SH_TBF 0x40
+#define SPI_SH_RBE 0x20
+#define SPI_SH_RBF 0x10
+#define SPI_SH_PFONRD 0x08
+#define SPI_SH_SSDB 0x04
+#define SPI_SH_SSD 0x02
+#define SPI_SH_SSA 0x01
+
+/* CR2 */
+#define SPI_SH_RSTF 0x80
+#define SPI_SH_LOOPBK 0x40
+#define SPI_SH_CPOL 0x20
+#define SPI_SH_CPHA 0x10
+#define SPI_SH_L1M0 0x08
+
+/* CR3 */
+#define SPI_SH_MAX_BYTE 0xFF
+
+/* CR4 */
+#define SPI_SH_TBEI 0x80
+#define SPI_SH_TBFI 0x40
+#define SPI_SH_RBEI 0x20
+#define SPI_SH_RBFI 0x10
+#define SPI_SH_WPABRT 0x04
+#define SPI_SH_SSS 0x01
+
+/* CR8 */
+#define SPI_SH_P1L0 0x80
+#define SPI_SH_PP1L0 0x40
+#define SPI_SH_MUXI 0x20
+#define SPI_SH_MUXIRQ 0x10
+
+#define SPI_SH_FIFO_SIZE 32
+#define SPI_SH_SEND_TIMEOUT (3 * HZ)
+#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3)
+
+#undef DEBUG
+
+struct spi_sh_data {
+ void __iomem *addr;
+ int irq;
+ struct spi_master *master;
+ unsigned long cr1;
+ wait_queue_head_t wait;
+ int width;
+};
+
+static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
+ unsigned long offset)
+{
+ if (ss->width == 8)
+ iowrite8(data, ss->addr + (offset >> 2));
+ else if (ss->width == 32)
+ iowrite32(data, ss->addr + offset);
+}
+
+static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
+{
+ if (ss->width == 8)
+ return ioread8(ss->addr + (offset >> 2));
+ else if (ss->width == 32)
+ return ioread32(ss->addr + offset);
+ else
+ return 0;
+}
+
+static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
+ unsigned long offset)
+{
+ unsigned long tmp;
+
+ tmp = spi_sh_read(ss, offset);
+ tmp |= val;
+ spi_sh_write(ss, tmp, offset);
+}
+
+static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
+ unsigned long offset)
+{
+ unsigned long tmp;
+
+ tmp = spi_sh_read(ss, offset);
+ tmp &= ~val;
+ spi_sh_write(ss, tmp, offset);
+}
+
+static void clear_fifo(struct spi_sh_data *ss)
+{
+ spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
+ spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
+}
+
+static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
+{
+ int timeout = 100000;
+
+ while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
+ udelay(10);
+ if (timeout-- < 0)
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
+{
+ int timeout = 100000;
+
+ while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
+ udelay(10);
+ if (timeout-- < 0)
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int i, retval = 0;
+ int remain = t->len;
+ int cur_len;
+ unsigned char *data;
+ long ret;
+
+ if (t->len)
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ data = (unsigned char *)t->tx_buf;
+ while (remain > 0) {
+ cur_len = min(SPI_SH_FIFO_SIZE, remain);
+ for (i = 0; i < cur_len &&
+ !(spi_sh_read(ss, SPI_SH_CR4) &
+ SPI_SH_WPABRT) &&
+ !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
+ i++)
+ spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
+
+ if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
+ /* Abort SPI operation */
+ spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
+ retval = -EIO;
+ break;
+ }
+
+ cur_len = i;
+
+ remain -= cur_len;
+ data += cur_len;
+
+ if (remain > 0) {
+ ss->cr1 &= ~SPI_SH_TBE;
+ spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_TBE,
+ SPI_SH_SEND_TIMEOUT);
+ if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+ }
+
+ if (list_is_last(&t->transfer_list, &mesg->transfers)) {
+ spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ ss->cr1 &= ~SPI_SH_TBE;
+ spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_TBE,
+ SPI_SH_SEND_TIMEOUT);
+ if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return retval;
+}
+
+static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int i;
+ int remain = t->len;
+ int cur_len;
+ unsigned char *data;
+ long ret;
+
+ if (t->len > SPI_SH_MAX_BYTE)
+ spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
+ else
+ spi_sh_write(ss, t->len, SPI_SH_CR3);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ spi_sh_wait_write_buffer_empty(ss);
+
+ data = (unsigned char *)t->rx_buf;
+ while (remain > 0) {
+ if (remain >= SPI_SH_FIFO_SIZE) {
+ ss->cr1 &= ~SPI_SH_RBF;
+ spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_RBF,
+ SPI_SH_RECEIVE_TIMEOUT);
+ if (ret == 0 &&
+ spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ cur_len = min(SPI_SH_FIFO_SIZE, remain);
+ for (i = 0; i < cur_len; i++) {
+ if (spi_sh_wait_receive_buffer(ss))
+ break;
+ data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
+ }
+
+ remain -= cur_len;
+ data += cur_len;
+ }
+
+ /* deassert CS when SPI is receiving. */
+ if (t->len > SPI_SH_MAX_BYTE) {
+ clear_fifo(ss);
+ spi_sh_write(ss, 1, SPI_SH_CR3);
+ } else {
+ spi_sh_write(ss, 0, SPI_SH_CR3);
+ }
+
+ return 0;
+}
+
+static int spi_sh_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *mesg)
+{
+ struct spi_sh_data *ss = spi_controller_get_devdata(ctlr);
+ struct spi_transfer *t;
+ int ret;
+
+ pr_debug("%s: enter\n", __func__);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ list_for_each_entry(t, &mesg->transfers, transfer_list) {
+ pr_debug("tx_buf = %p, rx_buf = %p\n",
+ t->tx_buf, t->rx_buf);
+ pr_debug("len = %d, delay.value = %d\n",
+ t->len, t->delay.value);
+
+ if (t->tx_buf) {
+ ret = spi_sh_send(ss, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ if (t->rx_buf) {
+ ret = spi_sh_receive(ss, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ mesg->actual_length += t->len;
+ }
+
+ mesg->status = 0;
+ spi_finalize_current_message(ctlr);
+
+ clear_fifo(ss);
+ spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
+ udelay(100);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+
+ clear_fifo(ss);
+
+ return 0;
+
+ error:
+ mesg->status = ret;
+ spi_finalize_current_message(ctlr);
+ if (mesg->complete)
+ mesg->complete(mesg->context);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+ clear_fifo(ss);
+
+ return ret;
+}
+
+static int spi_sh_setup(struct spi_device *spi)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+
+ pr_debug("%s: enter\n", __func__);
+
+ spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
+ spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */
+ spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */
+
+ clear_fifo(ss);
+
+ /* 1/8 clock */
+ spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
+ udelay(10);
+
+ return 0;
+}
+
+static void spi_sh_cleanup(struct spi_device *spi)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+
+ pr_debug("%s: enter\n", __func__);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+}
+
+static irqreturn_t spi_sh_irq(int irq, void *_ss)
+{
+ struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
+ unsigned long cr1;
+
+ cr1 = spi_sh_read(ss, SPI_SH_CR1);
+ if (cr1 & SPI_SH_TBE)
+ ss->cr1 |= SPI_SH_TBE;
+ if (cr1 & SPI_SH_TBF)
+ ss->cr1 |= SPI_SH_TBF;
+ if (cr1 & SPI_SH_RBE)
+ ss->cr1 |= SPI_SH_RBE;
+ if (cr1 & SPI_SH_RBF)
+ ss->cr1 |= SPI_SH_RBF;
+
+ if (ss->cr1) {
+ spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
+ wake_up(&ss->wait);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int spi_sh_remove(struct platform_device *pdev)
+{
+ struct spi_sh_data *ss = platform_get_drvdata(pdev);
+
+ spi_unregister_master(ss->master);
+ free_irq(ss->irq, ss);
+
+ return 0;
+}
+
+static int spi_sh_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct spi_master *master;
+ struct spi_sh_data *ss;
+ int ret, irq;
+
+ /* get base addr */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(res == NULL)) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ return -ENOMEM;
+ }
+
+ ss = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, ss);
+
+ switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_8BIT:
+ ss->width = 8;
+ break;
+ case IORESOURCE_MEM_32BIT:
+ ss->width = 32;
+ break;
+ default:
+ dev_err(&pdev->dev, "No support width\n");
+ return -ENODEV;
+ }
+ ss->irq = irq;
+ ss->master = master;
+ ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (ss->addr == NULL) {
+ dev_err(&pdev->dev, "ioremap error.\n");
+ return -ENOMEM;
+ }
+ init_waitqueue_head(&ss->wait);
+
+ ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq error\n");
+ return ret;
+ }
+
+ master->num_chipselect = 2;
+ master->bus_num = pdev->id;
+ master->setup = spi_sh_setup;
+ master->transfer_one_message = spi_sh_transfer_one_message;
+ master->cleanup = spi_sh_cleanup;
+
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ printk(KERN_ERR "spi_register_master error.\n");
+ goto error3;
+ }
+
+ return 0;
+
+ error3:
+ free_irq(irq, ss);
+ return ret;
+}
+
+static struct platform_driver spi_sh_driver = {
+ .probe = spi_sh_probe,
+ .remove = spi_sh_remove,
+ .driver = {
+ .name = "sh_spi",
+ },
+};
+module_platform_driver(spi_sh_driver);
+
+MODULE_DESCRIPTION("SH SPI bus driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_ALIAS("platform:sh_spi");
diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c
new file mode 100644
index 000000000..e29e85cee
--- /dev/null
+++ b/drivers/spi/spi-sifive.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2018 SiFive, Inc.
+//
+// SiFive SPI controller driver (master mode only)
+//
+// Author: SiFive, Inc.
+// sifive@sifive.com
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+
+#define SIFIVE_SPI_DRIVER_NAME "sifive_spi"
+
+#define SIFIVE_SPI_MAX_CS 32
+#define SIFIVE_SPI_DEFAULT_DEPTH 8
+#define SIFIVE_SPI_DEFAULT_MAX_BITS 8
+
+/* register offsets */
+#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */
+#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */
+#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */
+#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */
+#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */
+#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */
+#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */
+#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */
+#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */
+#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */
+#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */
+#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */
+#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */
+#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */
+#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */
+#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */
+
+/* sckdiv bits */
+#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU
+
+/* sckmode bits */
+#define SIFIVE_SPI_SCKMODE_PHA BIT(0)
+#define SIFIVE_SPI_SCKMODE_POL BIT(1)
+#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \
+ SIFIVE_SPI_SCKMODE_POL)
+
+/* csmode bits */
+#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U
+#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U
+#define SIFIVE_SPI_CSMODE_MODE_OFF 3U
+
+/* delay0 bits */
+#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x))
+#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU
+#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16)
+
+/* delay1 bits */
+#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x))
+#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU
+#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16)
+
+/* fmt bits */
+#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U
+#define SIFIVE_SPI_FMT_PROTO_DUAL 1U
+#define SIFIVE_SPI_FMT_PROTO_QUAD 2U
+#define SIFIVE_SPI_FMT_PROTO_MASK 3U
+#define SIFIVE_SPI_FMT_ENDIAN BIT(2)
+#define SIFIVE_SPI_FMT_DIR BIT(3)
+#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16)
+
+/* txdata bits */
+#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU
+#define SIFIVE_SPI_TXDATA_FULL BIT(31)
+
+/* rxdata bits */
+#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU
+#define SIFIVE_SPI_RXDATA_EMPTY BIT(31)
+
+/* ie and ip bits */
+#define SIFIVE_SPI_IP_TXWM BIT(0)
+#define SIFIVE_SPI_IP_RXWM BIT(1)
+
+struct sifive_spi {
+ void __iomem *regs; /* virt. address of control registers */
+ struct clk *clk; /* bus clock */
+ unsigned int fifo_depth; /* fifo depth in words */
+ u32 cs_inactive; /* level of the CS pins when inactive */
+ struct completion done; /* wake-up from interrupt */
+};
+
+static void sifive_spi_write(struct sifive_spi *spi, int offset, u32 value)
+{
+ iowrite32(value, spi->regs + offset);
+}
+
+static u32 sifive_spi_read(struct sifive_spi *spi, int offset)
+{
+ return ioread32(spi->regs + offset);
+}
+
+static void sifive_spi_init(struct sifive_spi *spi)
+{
+ /* Watermark interrupts are disabled by default */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+
+ /* Default watermark FIFO threshold values */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_TXMARK, 1);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, 0);
+
+ /* Set CS/SCK Delays and Inactive Time to defaults */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY0,
+ SIFIVE_SPI_DELAY0_CSSCK(1) |
+ SIFIVE_SPI_DELAY0_SCKCS(1));
+ sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY1,
+ SIFIVE_SPI_DELAY1_INTERCS(1) |
+ SIFIVE_SPI_DELAY1_INTERXFR(0));
+
+ /* Exit specialized memory-mapped SPI flash mode */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_FCTRL, 0);
+}
+
+static int
+sifive_spi_prepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+ struct spi_device *device = msg->spi;
+
+ /* Update the chip select polarity */
+ if (device->mode & SPI_CS_HIGH)
+ spi->cs_inactive &= ~BIT(device->chip_select);
+ else
+ spi->cs_inactive |= BIT(device->chip_select);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
+
+ /* Select the correct device */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSID, device->chip_select);
+
+ /* Set clock mode */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_SCKMODE,
+ device->mode & SIFIVE_SPI_SCKMODE_MODE_MASK);
+
+ return 0;
+}
+
+static void sifive_spi_set_cs(struct spi_device *device, bool is_high)
+{
+ struct sifive_spi *spi = spi_master_get_devdata(device->master);
+
+ /* Reverse polarity is handled by SCMR/CPOL. Not inverted CS. */
+ if (device->mode & SPI_CS_HIGH)
+ is_high = !is_high;
+
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSMODE, is_high ?
+ SIFIVE_SPI_CSMODE_MODE_AUTO :
+ SIFIVE_SPI_CSMODE_MODE_HOLD);
+}
+
+static int
+sifive_spi_prep_transfer(struct sifive_spi *spi, struct spi_device *device,
+ struct spi_transfer *t)
+{
+ u32 cr;
+ unsigned int mode;
+
+ /* Calculate and program the clock rate */
+ cr = DIV_ROUND_UP(clk_get_rate(spi->clk) >> 1, t->speed_hz) - 1;
+ cr &= SIFIVE_SPI_SCKDIV_DIV_MASK;
+ sifive_spi_write(spi, SIFIVE_SPI_REG_SCKDIV, cr);
+
+ mode = max_t(unsigned int, t->rx_nbits, t->tx_nbits);
+
+ /* Set frame format */
+ cr = SIFIVE_SPI_FMT_LEN(t->bits_per_word);
+ switch (mode) {
+ case SPI_NBITS_QUAD:
+ cr |= SIFIVE_SPI_FMT_PROTO_QUAD;
+ break;
+ case SPI_NBITS_DUAL:
+ cr |= SIFIVE_SPI_FMT_PROTO_DUAL;
+ break;
+ default:
+ cr |= SIFIVE_SPI_FMT_PROTO_SINGLE;
+ break;
+ }
+ if (device->mode & SPI_LSB_FIRST)
+ cr |= SIFIVE_SPI_FMT_ENDIAN;
+ if (!t->rx_buf)
+ cr |= SIFIVE_SPI_FMT_DIR;
+ sifive_spi_write(spi, SIFIVE_SPI_REG_FMT, cr);
+
+ /* We will want to poll if the time we need to wait is
+ * less than the context switching time.
+ * Let's call that threshold 5us. The operation will take:
+ * (8/mode) * fifo_depth / hz <= 5 * 10^-6
+ * 1600000 * fifo_depth <= hz * mode
+ */
+ return 1600000 * spi->fifo_depth <= t->speed_hz * mode;
+}
+
+static irqreturn_t sifive_spi_irq(int irq, void *dev_id)
+{
+ struct sifive_spi *spi = dev_id;
+ u32 ip = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
+
+ if (ip & (SIFIVE_SPI_IP_TXWM | SIFIVE_SPI_IP_RXWM)) {
+ /* Disable interrupts until next transfer */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+ complete(&spi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void sifive_spi_wait(struct sifive_spi *spi, u32 bit, int poll)
+{
+ if (poll) {
+ u32 cr;
+
+ do {
+ cr = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
+ } while (!(cr & bit));
+ } else {
+ reinit_completion(&spi->done);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, bit);
+ wait_for_completion(&spi->done);
+ }
+}
+
+static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr)
+{
+ WARN_ON_ONCE((sifive_spi_read(spi, SIFIVE_SPI_REG_TXDATA)
+ & SIFIVE_SPI_TXDATA_FULL) != 0);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_TXDATA,
+ *tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK);
+}
+
+static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr)
+{
+ u32 data = sifive_spi_read(spi, SIFIVE_SPI_REG_RXDATA);
+
+ WARN_ON_ONCE((data & SIFIVE_SPI_RXDATA_EMPTY) != 0);
+ *rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK;
+}
+
+static int
+sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device,
+ struct spi_transfer *t)
+{
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+ int poll = sifive_spi_prep_transfer(spi, device, t);
+ const u8 *tx_ptr = t->tx_buf;
+ u8 *rx_ptr = t->rx_buf;
+ unsigned int remaining_words = t->len;
+
+ while (remaining_words) {
+ unsigned int n_words = min(remaining_words, spi->fifo_depth);
+ unsigned int i;
+
+ /* Enqueue n_words for transmission */
+ for (i = 0; i < n_words; i++)
+ sifive_spi_tx(spi, tx_ptr++);
+
+ if (rx_ptr) {
+ /* Wait for transmission + reception to complete */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK,
+ n_words - 1);
+ sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM, poll);
+
+ /* Read out all the data from the RX FIFO */
+ for (i = 0; i < n_words; i++)
+ sifive_spi_rx(spi, rx_ptr++);
+ } else {
+ /* Wait for transmission to complete */
+ sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM, poll);
+ }
+
+ remaining_words -= n_words;
+ }
+
+ return 0;
+}
+
+static int sifive_spi_probe(struct platform_device *pdev)
+{
+ struct sifive_spi *spi;
+ int ret, irq, num_cs;
+ u32 cs_bits, max_bits_per_word;
+ struct spi_master *master;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sifive_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ spi = spi_master_get_devdata(master);
+ init_completion(&spi->done);
+ platform_set_drvdata(pdev, master);
+
+ spi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spi->regs)) {
+ ret = PTR_ERR(spi->regs);
+ goto put_master;
+ }
+
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ dev_err(&pdev->dev, "Unable to find bus clock\n");
+ ret = PTR_ERR(spi->clk);
+ goto put_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto put_master;
+ }
+
+ /* Optional parameters */
+ ret =
+ of_property_read_u32(pdev->dev.of_node, "sifive,fifo-depth",
+ &spi->fifo_depth);
+ if (ret < 0)
+ spi->fifo_depth = SIFIVE_SPI_DEFAULT_DEPTH;
+
+ ret =
+ of_property_read_u32(pdev->dev.of_node, "sifive,max-bits-per-word",
+ &max_bits_per_word);
+
+ if (!ret && max_bits_per_word < 8) {
+ dev_err(&pdev->dev, "Only 8bit SPI words supported by the driver\n");
+ ret = -EINVAL;
+ goto put_master;
+ }
+
+ /* Spin up the bus clock before hitting registers */
+ ret = clk_prepare_enable(spi->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable bus clock\n");
+ goto put_master;
+ }
+
+ /* probe the number of CS lines */
+ spi->cs_inactive = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, 0xffffffffU);
+ cs_bits = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
+ if (!cs_bits) {
+ dev_err(&pdev->dev, "Could not auto probe CS lines\n");
+ ret = -EINVAL;
+ goto disable_clk;
+ }
+
+ num_cs = ilog2(cs_bits) + 1;
+ if (num_cs > SIFIVE_SPI_MAX_CS) {
+ dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+ ret = -EINVAL;
+ goto disable_clk;
+ }
+
+ /* Define our master */
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->num_chipselect = num_cs;
+ master->mode_bits = SPI_CPHA | SPI_CPOL
+ | SPI_CS_HIGH | SPI_LSB_FIRST
+ | SPI_TX_DUAL | SPI_TX_QUAD
+ | SPI_RX_DUAL | SPI_RX_QUAD;
+ /* TODO: add driver support for bits_per_word < 8
+ * we need to "left-align" the bits (unless SPI_LSB_FIRST)
+ */
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->flags = SPI_CONTROLLER_MUST_TX | SPI_MASTER_GPIO_SS;
+ master->prepare_message = sifive_spi_prepare_message;
+ master->set_cs = sifive_spi_set_cs;
+ master->transfer_one = sifive_spi_transfer_one;
+
+ pdev->dev.dma_mask = NULL;
+ /* Configure the SPI master hardware */
+ sifive_spi_init(spi);
+
+ /* Register for SPI Interrupt */
+ ret = devm_request_irq(&pdev->dev, irq, sifive_spi_irq, 0,
+ dev_name(&pdev->dev), spi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to bind to interrupt\n");
+ goto disable_clk;
+ }
+
+ dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n",
+ irq, master->num_chipselect);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto disable_clk;
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(spi->clk);
+put_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int sifive_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+
+ /* Disable all the interrupts just in case */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+ clk_disable_unprepare(spi->clk);
+
+ return 0;
+}
+
+static int sifive_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ /* Disable all the interrupts just in case */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+
+ clk_disable_unprepare(spi->clk);
+
+ return ret;
+}
+
+static int sifive_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(spi->clk);
+ if (ret)
+ return ret;
+ ret = spi_master_resume(master);
+ if (ret)
+ clk_disable_unprepare(spi->clk);
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(sifive_spi_pm_ops,
+ sifive_spi_suspend, sifive_spi_resume);
+
+
+static const struct of_device_id sifive_spi_of_match[] = {
+ { .compatible = "sifive,spi0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sifive_spi_of_match);
+
+static struct platform_driver sifive_spi_driver = {
+ .probe = sifive_spi_probe,
+ .remove = sifive_spi_remove,
+ .driver = {
+ .name = SIFIVE_SPI_DRIVER_NAME,
+ .pm = &sifive_spi_pm_ops,
+ .of_match_table = sifive_spi_of_match,
+ },
+};
+module_platform_driver(sifive_spi_driver);
+
+MODULE_AUTHOR("SiFive, Inc. <sifive@sifive.com>");
+MODULE_DESCRIPTION("SiFive SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-slave-mt27xx.c b/drivers/spi/spi-slave-mt27xx.c
new file mode 100644
index 000000000..f199a6c47
--- /dev/null
+++ b/drivers/spi/spi-slave-mt27xx.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2018 MediaTek Inc.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/of.h>
+
+
+#define SPIS_IRQ_EN_REG 0x0
+#define SPIS_IRQ_CLR_REG 0x4
+#define SPIS_IRQ_ST_REG 0x8
+#define SPIS_IRQ_MASK_REG 0xc
+#define SPIS_CFG_REG 0x10
+#define SPIS_RX_DATA_REG 0x14
+#define SPIS_TX_DATA_REG 0x18
+#define SPIS_RX_DST_REG 0x1c
+#define SPIS_TX_SRC_REG 0x20
+#define SPIS_DMA_CFG_REG 0x30
+#define SPIS_SOFT_RST_REG 0x40
+
+/* SPIS_IRQ_EN_REG */
+#define DMA_DONE_EN BIT(7)
+#define DATA_DONE_EN BIT(2)
+#define RSTA_DONE_EN BIT(1)
+#define CMD_INVALID_EN BIT(0)
+
+/* SPIS_IRQ_ST_REG */
+#define DMA_DONE_ST BIT(7)
+#define DATA_DONE_ST BIT(2)
+#define RSTA_DONE_ST BIT(1)
+#define CMD_INVALID_ST BIT(0)
+
+/* SPIS_IRQ_MASK_REG */
+#define DMA_DONE_MASK BIT(7)
+#define DATA_DONE_MASK BIT(2)
+#define RSTA_DONE_MASK BIT(1)
+#define CMD_INVALID_MASK BIT(0)
+
+/* SPIS_CFG_REG */
+#define SPIS_TX_ENDIAN BIT(7)
+#define SPIS_RX_ENDIAN BIT(6)
+#define SPIS_TXMSBF BIT(5)
+#define SPIS_RXMSBF BIT(4)
+#define SPIS_CPHA BIT(3)
+#define SPIS_CPOL BIT(2)
+#define SPIS_TX_EN BIT(1)
+#define SPIS_RX_EN BIT(0)
+
+/* SPIS_DMA_CFG_REG */
+#define TX_DMA_TRIG_EN BIT(31)
+#define TX_DMA_EN BIT(30)
+#define RX_DMA_EN BIT(29)
+#define TX_DMA_LEN 0xfffff
+
+/* SPIS_SOFT_RST_REG */
+#define SPIS_DMA_ADDR_EN BIT(1)
+#define SPIS_SOFT_RST BIT(0)
+
+struct mtk_spi_slave {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *spi_clk;
+ struct completion xfer_done;
+ struct spi_transfer *cur_transfer;
+ bool slave_aborted;
+ const struct mtk_spi_compatible *dev_comp;
+};
+
+struct mtk_spi_compatible {
+ const u32 max_fifo_size;
+ bool must_rx;
+};
+
+static const struct mtk_spi_compatible mt2712_compat = {
+ .max_fifo_size = 512,
+};
+static const struct mtk_spi_compatible mt8195_compat = {
+ .max_fifo_size = 128,
+ .must_rx = true,
+};
+
+static const struct of_device_id mtk_spi_slave_of_match[] = {
+ { .compatible = "mediatek,mt2712-spi-slave",
+ .data = (void *)&mt2712_compat,},
+ { .compatible = "mediatek,mt8195-spi-slave",
+ .data = (void *)&mt8195_compat,},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match);
+
+static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata)
+{
+ u32 reg_val;
+
+ reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
+ reg_val &= ~RX_DMA_EN;
+ reg_val &= ~TX_DMA_EN;
+ writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
+}
+
+static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
+{
+ u32 reg_val;
+
+ reg_val = readl(mdata->base + SPIS_CFG_REG);
+ reg_val &= ~SPIS_TX_EN;
+ reg_val &= ~SPIS_RX_EN;
+ writel(reg_val, mdata->base + SPIS_CFG_REG);
+}
+
+static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
+{
+ if (wait_for_completion_interruptible(&mdata->xfer_done) ||
+ mdata->slave_aborted) {
+ dev_err(mdata->dev, "interrupted\n");
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = msg->spi;
+ bool cpha, cpol;
+ u32 reg_val;
+
+ cpha = spi->mode & SPI_CPHA ? 1 : 0;
+ cpol = spi->mode & SPI_CPOL ? 1 : 0;
+
+ reg_val = readl(mdata->base + SPIS_CFG_REG);
+ if (cpha)
+ reg_val |= SPIS_CPHA;
+ else
+ reg_val &= ~SPIS_CPHA;
+ if (cpol)
+ reg_val |= SPIS_CPOL;
+ else
+ reg_val &= ~SPIS_CPOL;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF);
+ else
+ reg_val |= SPIS_TXMSBF | SPIS_RXMSBF;
+
+ reg_val &= ~SPIS_TX_ENDIAN;
+ reg_val &= ~SPIS_RX_ENDIAN;
+ writel(reg_val, mdata->base + SPIS_CFG_REG);
+
+ return 0;
+}
+
+static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ int reg_val, cnt, remainder, ret;
+
+ writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
+
+ reg_val = readl(mdata->base + SPIS_CFG_REG);
+ if (xfer->rx_buf)
+ reg_val |= SPIS_RX_EN;
+ if (xfer->tx_buf)
+ reg_val |= SPIS_TX_EN;
+ writel(reg_val, mdata->base + SPIS_CFG_REG);
+
+ cnt = xfer->len / 4;
+ if (xfer->tx_buf)
+ iowrite32_rep(mdata->base + SPIS_TX_DATA_REG,
+ xfer->tx_buf, cnt);
+
+ remainder = xfer->len % 4;
+ if (xfer->tx_buf && remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val, xfer->tx_buf + cnt * 4, remainder);
+ writel(reg_val, mdata->base + SPIS_TX_DATA_REG);
+ }
+
+ ret = mtk_spi_slave_wait_for_completion(mdata);
+ if (ret) {
+ mtk_spi_slave_disable_xfer(mdata);
+ writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
+ }
+
+ return ret;
+}
+
+static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ struct device *dev = mdata->dev;
+ int reg_val, ret;
+
+ writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
+
+ if (xfer->tx_buf) {
+ /* tx_buf is a const void* where we need a void * for
+ * the dma mapping
+ */
+ void *nonconst_tx = (void *)xfer->tx_buf;
+
+ xfer->tx_dma = dma_map_single(dev, nonconst_tx,
+ xfer->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, xfer->tx_dma)) {
+ ret = -ENOMEM;
+ goto disable_transfer;
+ }
+ }
+
+ if (xfer->rx_buf) {
+ xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
+ xfer->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, xfer->rx_dma)) {
+ ret = -ENOMEM;
+ goto unmap_txdma;
+ }
+ }
+
+ writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG);
+ writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG);
+
+ writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG);
+
+ /* enable config reg tx rx_enable */
+ reg_val = readl(mdata->base + SPIS_CFG_REG);
+ if (xfer->tx_buf)
+ reg_val |= SPIS_TX_EN;
+ if (xfer->rx_buf)
+ reg_val |= SPIS_RX_EN;
+ writel(reg_val, mdata->base + SPIS_CFG_REG);
+
+ /* config dma */
+ reg_val = 0;
+ reg_val |= (xfer->len - 1) & TX_DMA_LEN;
+ writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
+
+ reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
+ if (xfer->tx_buf)
+ reg_val |= TX_DMA_EN;
+ if (xfer->rx_buf)
+ reg_val |= RX_DMA_EN;
+ reg_val |= TX_DMA_TRIG_EN;
+ writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
+
+ ret = mtk_spi_slave_wait_for_completion(mdata);
+ if (ret)
+ goto unmap_rxdma;
+
+ return 0;
+
+unmap_rxdma:
+ if (xfer->rx_buf)
+ dma_unmap_single(dev, xfer->rx_dma,
+ xfer->len, DMA_FROM_DEVICE);
+
+unmap_txdma:
+ if (xfer->tx_buf)
+ dma_unmap_single(dev, xfer->tx_dma,
+ xfer->len, DMA_TO_DEVICE);
+
+disable_transfer:
+ mtk_spi_slave_disable_dma(mdata);
+ mtk_spi_slave_disable_xfer(mdata);
+ writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
+
+ return ret;
+}
+
+static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+
+ reinit_completion(&mdata->xfer_done);
+ mdata->slave_aborted = false;
+ mdata->cur_transfer = xfer;
+
+ if (xfer->len > mdata->dev_comp->max_fifo_size)
+ return mtk_spi_slave_dma_transfer(ctlr, spi, xfer);
+ else
+ return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer);
+}
+
+static int mtk_spi_slave_setup(struct spi_device *spi)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
+ u32 reg_val;
+
+ reg_val = DMA_DONE_EN | DATA_DONE_EN |
+ RSTA_DONE_EN | CMD_INVALID_EN;
+ writel(reg_val, mdata->base + SPIS_IRQ_EN_REG);
+
+ reg_val = DMA_DONE_MASK | DATA_DONE_MASK |
+ RSTA_DONE_MASK | CMD_INVALID_MASK;
+ writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG);
+
+ mtk_spi_slave_disable_dma(mdata);
+ mtk_spi_slave_disable_xfer(mdata);
+
+ return 0;
+}
+
+static int mtk_slave_abort(struct spi_controller *ctlr)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+
+ mdata->slave_aborted = true;
+ complete(&mdata->xfer_done);
+
+ return 0;
+}
+
+static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id)
+{
+ struct spi_controller *ctlr = dev_id;
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ struct spi_transfer *trans = mdata->cur_transfer;
+ u32 int_status, reg_val, cnt, remainder;
+
+ int_status = readl(mdata->base + SPIS_IRQ_ST_REG);
+ writel(int_status, mdata->base + SPIS_IRQ_CLR_REG);
+
+ if (!trans)
+ return IRQ_NONE;
+
+ if ((int_status & DMA_DONE_ST) &&
+ ((int_status & DATA_DONE_ST) ||
+ (int_status & RSTA_DONE_ST))) {
+ writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
+
+ if (trans->tx_buf)
+ dma_unmap_single(mdata->dev, trans->tx_dma,
+ trans->len, DMA_TO_DEVICE);
+ if (trans->rx_buf)
+ dma_unmap_single(mdata->dev, trans->rx_dma,
+ trans->len, DMA_FROM_DEVICE);
+
+ mtk_spi_slave_disable_dma(mdata);
+ mtk_spi_slave_disable_xfer(mdata);
+ }
+
+ if ((!(int_status & DMA_DONE_ST)) &&
+ ((int_status & DATA_DONE_ST) ||
+ (int_status & RSTA_DONE_ST))) {
+ cnt = trans->len / 4;
+ if (trans->rx_buf)
+ ioread32_rep(mdata->base + SPIS_RX_DATA_REG,
+ trans->rx_buf, cnt);
+ remainder = trans->len % 4;
+ if (trans->rx_buf && remainder > 0) {
+ reg_val = readl(mdata->base + SPIS_RX_DATA_REG);
+ memcpy(trans->rx_buf + (cnt * 4),
+ &reg_val, remainder);
+ }
+
+ mtk_spi_slave_disable_xfer(mdata);
+ }
+
+ if (int_status & CMD_INVALID_ST) {
+ dev_warn(&ctlr->dev, "cmd invalid\n");
+ return IRQ_NONE;
+ }
+
+ mdata->cur_transfer = NULL;
+ complete(&mdata->xfer_done);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_spi_slave_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct mtk_spi_slave *mdata;
+ int irq, ret;
+ const struct of_device_id *of_id;
+
+ ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
+ if (!ctlr) {
+ dev_err(&pdev->dev, "failed to alloc spi slave\n");
+ return -ENOMEM;
+ }
+
+ ctlr->auto_runtime_pm = true;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
+ ctlr->mode_bits |= SPI_LSB_FIRST;
+
+ ctlr->prepare_message = mtk_spi_slave_prepare_message;
+ ctlr->transfer_one = mtk_spi_slave_transfer_one;
+ ctlr->setup = mtk_spi_slave_setup;
+ ctlr->slave_abort = mtk_slave_abort;
+
+ of_id = of_match_node(mtk_spi_slave_of_match, pdev->dev.of_node);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to probe of_node\n");
+ ret = -EINVAL;
+ goto err_put_ctlr;
+ }
+ mdata = spi_controller_get_devdata(ctlr);
+ mdata->dev_comp = of_id->data;
+
+ if (mdata->dev_comp->must_rx)
+ ctlr->flags = SPI_MASTER_MUST_RX;
+
+ platform_set_drvdata(pdev, ctlr);
+
+ init_completion(&mdata->xfer_done);
+ mdata->dev = &pdev->dev;
+ mdata->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdata->base)) {
+ ret = PTR_ERR(mdata->base);
+ goto err_put_ctlr;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_put_ctlr;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt,
+ IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
+ goto err_put_ctlr;
+ }
+
+ mdata->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(mdata->spi_clk)) {
+ ret = PTR_ERR(mdata->spi_clk);
+ dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
+ goto err_put_ctlr;
+ }
+
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
+ goto err_put_ctlr;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register slave controller(%d)\n", ret);
+ clk_disable_unprepare(mdata->spi_clk);
+ goto err_disable_runtime_pm;
+ }
+
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return 0;
+
+err_disable_runtime_pm:
+ pm_runtime_disable(&pdev->dev);
+err_put_ctlr:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+static int mtk_spi_slave_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_spi_slave_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = spi_controller_suspend(ctlr);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return ret;
+}
+
+static int mtk_spi_slave_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ ret = spi_controller_resume(ctlr);
+ if (ret < 0)
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int mtk_spi_slave_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return 0;
+}
+
+static int mtk_spi_slave_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops mtk_spi_slave_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume)
+ SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend,
+ mtk_spi_slave_runtime_resume, NULL)
+};
+
+static struct platform_driver mtk_spi_slave_driver = {
+ .driver = {
+ .name = "mtk-spi-slave",
+ .pm = &mtk_spi_slave_pm,
+ .of_match_table = mtk_spi_slave_of_match,
+ },
+ .probe = mtk_spi_slave_probe,
+ .remove = mtk_spi_slave_remove,
+};
+
+module_platform_driver(mtk_spi_slave_driver);
+
+MODULE_DESCRIPTION("MTK SPI Slave Controller driver");
+MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mtk-spi-slave");
diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c
new file mode 100644
index 000000000..d37cfe995
--- /dev/null
+++ b/drivers/spi/spi-slave-system-control.c
@@ -0,0 +1,154 @@
+/*
+ * SPI slave handler controlling system state
+ *
+ * This SPI slave handler allows remote control of system reboot, power off,
+ * halt, and suspend.
+ *
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
+ * system):
+ *
+ * # reboot='\x7c\x50'
+ * # poweroff='\x71\x3f'
+ * # halt='\x38\x76'
+ * # suspend='\x1b\x1b'
+ * # spidev_test -D /dev/spidev2.0 -p $suspend # or $reboot, $poweroff, $halt
+ */
+
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+#include <linux/spi/spi.h>
+
+/*
+ * The numbers are chosen to display something human-readable on two 7-segment
+ * displays connected to two 74HC595 shift registers
+ */
+#define CMD_REBOOT 0x7c50 /* rb */
+#define CMD_POWEROFF 0x713f /* OF */
+#define CMD_HALT 0x3876 /* HL */
+#define CMD_SUSPEND 0x1b1b /* ZZ */
+
+struct spi_slave_system_control_priv {
+ struct spi_device *spi;
+ struct completion finished;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ __be16 cmd;
+};
+
+static
+int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv);
+
+static void spi_slave_system_control_complete(void *arg)
+{
+ struct spi_slave_system_control_priv *priv = arg;
+ u16 cmd;
+ int ret;
+
+ if (priv->msg.status)
+ goto terminate;
+
+ cmd = be16_to_cpu(priv->cmd);
+ switch (cmd) {
+ case CMD_REBOOT:
+ dev_info(&priv->spi->dev, "Rebooting system...\n");
+ kernel_restart(NULL);
+ break;
+
+ case CMD_POWEROFF:
+ dev_info(&priv->spi->dev, "Powering off system...\n");
+ kernel_power_off();
+ break;
+
+ case CMD_HALT:
+ dev_info(&priv->spi->dev, "Halting system...\n");
+ kernel_halt();
+ break;
+
+ case CMD_SUSPEND:
+ dev_info(&priv->spi->dev, "Suspending system...\n");
+ pm_suspend(PM_SUSPEND_MEM);
+ break;
+
+ default:
+ dev_warn(&priv->spi->dev, "Unknown command 0x%x\n", cmd);
+ break;
+ }
+
+ ret = spi_slave_system_control_submit(priv);
+ if (ret)
+ goto terminate;
+
+ return;
+
+terminate:
+ dev_info(&priv->spi->dev, "Terminating\n");
+ complete(&priv->finished);
+}
+
+static
+int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv)
+{
+ int ret;
+
+ spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
+
+ priv->msg.complete = spi_slave_system_control_complete;
+ priv->msg.context = priv;
+
+ ret = spi_async(priv->spi, &priv->msg);
+ if (ret)
+ dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
+
+ return ret;
+}
+
+static int spi_slave_system_control_probe(struct spi_device *spi)
+{
+ struct spi_slave_system_control_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spi = spi;
+ init_completion(&priv->finished);
+ priv->xfer.rx_buf = &priv->cmd;
+ priv->xfer.len = sizeof(priv->cmd);
+
+ ret = spi_slave_system_control_submit(priv);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, priv);
+ return 0;
+}
+
+static void spi_slave_system_control_remove(struct spi_device *spi)
+{
+ struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi);
+
+ spi_slave_abort(spi);
+ wait_for_completion(&priv->finished);
+}
+
+static struct spi_driver spi_slave_system_control_driver = {
+ .driver = {
+ .name = "spi-slave-system-control",
+ },
+ .probe = spi_slave_system_control_probe,
+ .remove = spi_slave_system_control_remove,
+};
+module_spi_driver(spi_slave_system_control_driver);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("SPI slave handler controlling system state");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c
new file mode 100644
index 000000000..f56c1afb8
--- /dev/null
+++ b/drivers/spi/spi-slave-time.c
@@ -0,0 +1,128 @@
+/*
+ * SPI slave handler reporting uptime at reception of previous SPI message
+ *
+ * This SPI slave handler sends the time of reception of the last SPI message
+ * as two 32-bit unsigned integers in binary format and in network byte order,
+ * representing the number of seconds and fractional seconds (in microseconds)
+ * since boot up.
+ *
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
+ * system):
+ *
+ * # spidev_test -D /dev/spidev2.0 -p dummy-8B
+ * spi mode: 0x0
+ * bits per word: 8
+ * max speed: 500000 Hz (500 KHz)
+ * RX | 00 00 04 6D 00 09 5B BB ...
+ * ^^^^^ ^^^^^^^^
+ * seconds microseconds
+ */
+
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/sched/clock.h>
+#include <linux/spi/spi.h>
+
+
+struct spi_slave_time_priv {
+ struct spi_device *spi;
+ struct completion finished;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ __be32 buf[2];
+};
+
+static int spi_slave_time_submit(struct spi_slave_time_priv *priv);
+
+static void spi_slave_time_complete(void *arg)
+{
+ struct spi_slave_time_priv *priv = arg;
+ int ret;
+
+ ret = priv->msg.status;
+ if (ret)
+ goto terminate;
+
+ ret = spi_slave_time_submit(priv);
+ if (ret)
+ goto terminate;
+
+ return;
+
+terminate:
+ dev_info(&priv->spi->dev, "Terminating\n");
+ complete(&priv->finished);
+}
+
+static int spi_slave_time_submit(struct spi_slave_time_priv *priv)
+{
+ u32 rem_us;
+ int ret;
+ u64 ts;
+
+ ts = local_clock();
+ rem_us = do_div(ts, 1000000000) / 1000;
+
+ priv->buf[0] = cpu_to_be32(ts);
+ priv->buf[1] = cpu_to_be32(rem_us);
+
+ spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
+
+ priv->msg.complete = spi_slave_time_complete;
+ priv->msg.context = priv;
+
+ ret = spi_async(priv->spi, &priv->msg);
+ if (ret)
+ dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
+
+ return ret;
+}
+
+static int spi_slave_time_probe(struct spi_device *spi)
+{
+ struct spi_slave_time_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spi = spi;
+ init_completion(&priv->finished);
+ priv->xfer.tx_buf = priv->buf;
+ priv->xfer.len = sizeof(priv->buf);
+
+ ret = spi_slave_time_submit(priv);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, priv);
+ return 0;
+}
+
+static void spi_slave_time_remove(struct spi_device *spi)
+{
+ struct spi_slave_time_priv *priv = spi_get_drvdata(spi);
+
+ spi_slave_abort(spi);
+ wait_for_completion(&priv->finished);
+}
+
+static struct spi_driver spi_slave_time_driver = {
+ .driver = {
+ .name = "spi-slave-time",
+ },
+ .probe = spi_slave_time_probe,
+ .remove = spi_slave_time_remove,
+};
+module_spi_driver(spi_slave_time_driver);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("SPI slave reporting uptime at previous SPI message");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
new file mode 100644
index 000000000..1edbf44c0
--- /dev/null
+++ b/drivers/spi/spi-sprd-adi.c
@@ -0,0 +1,669 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/delay.h>
+#include <linux/hwspinlock.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+/* Registers definitions for ADI controller */
+#define REG_ADI_CTRL0 0x4
+#define REG_ADI_CHN_PRIL 0x8
+#define REG_ADI_CHN_PRIH 0xc
+#define REG_ADI_INT_EN 0x10
+#define REG_ADI_INT_RAW 0x14
+#define REG_ADI_INT_MASK 0x18
+#define REG_ADI_INT_CLR 0x1c
+#define REG_ADI_GSSI_CFG0 0x20
+#define REG_ADI_GSSI_CFG1 0x24
+#define REG_ADI_RD_CMD 0x28
+#define REG_ADI_RD_DATA 0x2c
+#define REG_ADI_ARM_FIFO_STS 0x30
+#define REG_ADI_STS 0x34
+#define REG_ADI_EVT_FIFO_STS 0x38
+#define REG_ADI_ARM_CMD_STS 0x3c
+#define REG_ADI_CHN_EN 0x40
+#define REG_ADI_CHN_ADDR(id) (0x44 + (id - 2) * 4)
+#define REG_ADI_CHN_EN1 0x20c
+
+/* Bits definitions for register REG_ADI_GSSI_CFG0 */
+#define BIT_CLK_ALL_ON BIT(30)
+
+/* Bits definitions for register REG_ADI_RD_DATA */
+#define BIT_RD_CMD_BUSY BIT(31)
+#define RD_ADDR_SHIFT 16
+#define RD_VALUE_MASK GENMASK(15, 0)
+#define RD_ADDR_MASK GENMASK(30, 16)
+
+/* Bits definitions for register REG_ADI_ARM_FIFO_STS */
+#define BIT_FIFO_FULL BIT(11)
+#define BIT_FIFO_EMPTY BIT(10)
+
+/*
+ * ADI slave devices include RTC, ADC, regulator, charger, thermal and so on.
+ * ADI supports 12/14bit address for r2p0, and additional 17bit for r3p0 or
+ * later versions. Since bit[1:0] are zero, so the spec describe them as
+ * 10/12/15bit address mode.
+ * The 10bit mode supports sigle slave, 12/15bit mode supports 3 slave, the
+ * high two bits is slave_id.
+ * The slave devices address offset is 0x8000 for 10/12bit address mode,
+ * and 0x20000 for 15bit mode.
+ */
+#define ADI_10BIT_SLAVE_ADDR_SIZE SZ_4K
+#define ADI_10BIT_SLAVE_OFFSET 0x8000
+#define ADI_12BIT_SLAVE_ADDR_SIZE SZ_16K
+#define ADI_12BIT_SLAVE_OFFSET 0x8000
+#define ADI_15BIT_SLAVE_ADDR_SIZE SZ_128K
+#define ADI_15BIT_SLAVE_OFFSET 0x20000
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define ADI_HWSPINLOCK_TIMEOUT 5000
+/*
+ * ADI controller has 50 channels including 2 software channels
+ * and 48 hardware channels.
+ */
+#define ADI_HW_CHNS 50
+
+#define ADI_FIFO_DRAIN_TIMEOUT 1000
+#define ADI_READ_TIMEOUT 2000
+
+/*
+ * Read back address from REG_ADI_RD_DATA bit[30:16] which maps to:
+ * REG_ADI_RD_CMD bit[14:0] for r2p0
+ * REG_ADI_RD_CMD bit[16:2] for r3p0
+ */
+#define RDBACK_ADDR_MASK_R2 GENMASK(14, 0)
+#define RDBACK_ADDR_MASK_R3 GENMASK(16, 2)
+#define RDBACK_ADDR_SHIFT_R3 2
+
+/* Registers definitions for PMIC watchdog controller */
+#define REG_WDG_LOAD_LOW 0x0
+#define REG_WDG_LOAD_HIGH 0x4
+#define REG_WDG_CTRL 0x8
+#define REG_WDG_LOCK 0x20
+
+/* Bits definitions for register REG_WDG_CTRL */
+#define BIT_WDG_RUN BIT(1)
+#define BIT_WDG_NEW BIT(2)
+#define BIT_WDG_RST BIT(3)
+
+/* Bits definitions for register REG_MODULE_EN */
+#define BIT_WDG_EN BIT(2)
+
+/* Registers definitions for PMIC */
+#define PMIC_RST_STATUS 0xee8
+#define PMIC_MODULE_EN 0xc08
+#define PMIC_CLK_EN 0xc18
+#define PMIC_WDG_BASE 0x80
+
+/* Definition of PMIC reset status register */
+#define HWRST_STATUS_SECURITY 0x02
+#define HWRST_STATUS_RECOVERY 0x20
+#define HWRST_STATUS_NORMAL 0x40
+#define HWRST_STATUS_ALARM 0x50
+#define HWRST_STATUS_SLEEP 0x60
+#define HWRST_STATUS_FASTBOOT 0x30
+#define HWRST_STATUS_SPECIAL 0x70
+#define HWRST_STATUS_PANIC 0x80
+#define HWRST_STATUS_CFTREBOOT 0x90
+#define HWRST_STATUS_AUTODLOADER 0xa0
+#define HWRST_STATUS_IQMODE 0xb0
+#define HWRST_STATUS_SPRDISK 0xc0
+#define HWRST_STATUS_FACTORYTEST 0xe0
+#define HWRST_STATUS_WATCHDOG 0xf0
+
+/* Use default timeout 50 ms that converts to watchdog values */
+#define WDG_LOAD_VAL ((50 * 32768) / 1000)
+#define WDG_LOAD_MASK GENMASK(15, 0)
+#define WDG_UNLOCK_KEY 0xe551
+
+struct sprd_adi_wdg {
+ u32 base;
+ u32 rst_sts;
+ u32 wdg_en;
+ u32 wdg_clk;
+};
+
+struct sprd_adi_data {
+ u32 slave_offset;
+ u32 slave_addr_size;
+ int (*read_check)(u32 val, u32 reg);
+ int (*restart)(struct notifier_block *this,
+ unsigned long mode, void *cmd);
+ void (*wdg_rst)(void *p);
+};
+
+struct sprd_adi {
+ struct spi_controller *ctlr;
+ struct device *dev;
+ void __iomem *base;
+ struct hwspinlock *hwlock;
+ unsigned long slave_vbase;
+ unsigned long slave_pbase;
+ struct notifier_block restart_handler;
+ const struct sprd_adi_data *data;
+};
+
+static int sprd_adi_check_addr(struct sprd_adi *sadi, u32 reg)
+{
+ if (reg >= sadi->data->slave_addr_size) {
+ dev_err(sadi->dev,
+ "slave address offset is incorrect, reg = 0x%x\n",
+ reg);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_drain_fifo(struct sprd_adi *sadi)
+{
+ u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
+ u32 sts;
+
+ do {
+ sts = readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS);
+ if (sts & BIT_FIFO_EMPTY)
+ break;
+
+ cpu_relax();
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dev_err(sadi->dev, "drain write fifo timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_fifo_is_full(struct sprd_adi *sadi)
+{
+ return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL;
+}
+
+static int sprd_adi_read_check(u32 val, u32 addr)
+{
+ u32 rd_addr;
+
+ rd_addr = (val & RD_ADDR_MASK) >> RD_ADDR_SHIFT;
+
+ if (rd_addr != addr) {
+ pr_err("ADI read error, addr = 0x%x, val = 0x%x\n", addr, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_read_check_r2(u32 val, u32 reg)
+{
+ return sprd_adi_read_check(val, reg & RDBACK_ADDR_MASK_R2);
+}
+
+static int sprd_adi_read_check_r3(u32 val, u32 reg)
+{
+ return sprd_adi_read_check(val, (reg & RDBACK_ADDR_MASK_R3) >> RDBACK_ADDR_SHIFT_R3);
+}
+
+static int sprd_adi_read(struct sprd_adi *sadi, u32 reg, u32 *read_val)
+{
+ int read_timeout = ADI_READ_TIMEOUT;
+ unsigned long flags;
+ u32 val;
+ int ret = 0;
+
+ if (sadi->hwlock) {
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
+ }
+
+ ret = sprd_adi_check_addr(sadi, reg);
+ if (ret)
+ goto out;
+
+ /*
+ * Set the slave address offset need to read into RD_CMD register,
+ * then ADI controller will start to transfer automatically.
+ */
+ writel_relaxed(reg, sadi->base + REG_ADI_RD_CMD);
+
+ /*
+ * Wait read operation complete, the BIT_RD_CMD_BUSY will be set
+ * simultaneously when writing read command to register, and the
+ * BIT_RD_CMD_BUSY will be cleared after the read operation is
+ * completed.
+ */
+ do {
+ val = readl_relaxed(sadi->base + REG_ADI_RD_DATA);
+ if (!(val & BIT_RD_CMD_BUSY))
+ break;
+
+ cpu_relax();
+ } while (--read_timeout);
+
+ if (read_timeout == 0) {
+ dev_err(sadi->dev, "ADI read timeout\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * The return value before adi r5p0 includes data and read register
+ * address, from bit 0to bit 15 are data, and from bit 16 to bit 30
+ * are read register address. Then we can check the returned register
+ * address to validate data.
+ */
+ if (sadi->data->read_check) {
+ ret = sadi->data->read_check(val, reg);
+ if (ret < 0)
+ goto out;
+ }
+
+ *read_val = val & RD_VALUE_MASK;
+
+out:
+ if (sadi->hwlock)
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ return ret;
+}
+
+static int sprd_adi_write(struct sprd_adi *sadi, u32 reg, u32 val)
+{
+ u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
+ unsigned long flags;
+ int ret;
+
+ if (sadi->hwlock) {
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
+ }
+
+ ret = sprd_adi_check_addr(sadi, reg);
+ if (ret)
+ goto out;
+
+ ret = sprd_adi_drain_fifo(sadi);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * we should wait for write fifo is empty before writing data to PMIC
+ * registers.
+ */
+ do {
+ if (!sprd_adi_fifo_is_full(sadi)) {
+ /* we need virtual register address to write. */
+ writel_relaxed(val, (void __iomem *)(sadi->slave_vbase + reg));
+ break;
+ }
+
+ cpu_relax();
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dev_err(sadi->dev, "write fifo is full\n");
+ ret = -EBUSY;
+ }
+
+out:
+ if (sadi->hwlock)
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ return ret;
+}
+
+static int sprd_adi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi_dev,
+ struct spi_transfer *t)
+{
+ struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
+ u32 reg, val;
+ int ret;
+
+ if (t->rx_buf) {
+ reg = *(u32 *)t->rx_buf;
+ ret = sprd_adi_read(sadi, reg, &val);
+ *(u32 *)t->rx_buf = val;
+ } else if (t->tx_buf) {
+ u32 *p = (u32 *)t->tx_buf;
+ reg = *p++;
+ val = *p;
+ ret = sprd_adi_write(sadi, reg, val);
+ } else {
+ dev_err(sadi->dev, "no buffer for transfer\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void sprd_adi_set_wdt_rst_mode(void *p)
+{
+#if IS_ENABLED(CONFIG_SPRD_WATCHDOG)
+ u32 val;
+ struct sprd_adi *sadi = (struct sprd_adi *)p;
+
+ /* Init watchdog reset mode */
+ sprd_adi_read(sadi, PMIC_RST_STATUS, &val);
+ val |= HWRST_STATUS_WATCHDOG;
+ sprd_adi_write(sadi, PMIC_RST_STATUS, val);
+#endif
+}
+
+static int sprd_adi_restart(struct notifier_block *this, unsigned long mode,
+ void *cmd, struct sprd_adi_wdg *wdg)
+{
+ struct sprd_adi *sadi = container_of(this, struct sprd_adi,
+ restart_handler);
+ u32 val, reboot_mode = 0;
+
+ if (!cmd)
+ reboot_mode = HWRST_STATUS_NORMAL;
+ else if (!strncmp(cmd, "recovery", 8))
+ reboot_mode = HWRST_STATUS_RECOVERY;
+ else if (!strncmp(cmd, "alarm", 5))
+ reboot_mode = HWRST_STATUS_ALARM;
+ else if (!strncmp(cmd, "fastsleep", 9))
+ reboot_mode = HWRST_STATUS_SLEEP;
+ else if (!strncmp(cmd, "bootloader", 10))
+ reboot_mode = HWRST_STATUS_FASTBOOT;
+ else if (!strncmp(cmd, "panic", 5))
+ reboot_mode = HWRST_STATUS_PANIC;
+ else if (!strncmp(cmd, "special", 7))
+ reboot_mode = HWRST_STATUS_SPECIAL;
+ else if (!strncmp(cmd, "cftreboot", 9))
+ reboot_mode = HWRST_STATUS_CFTREBOOT;
+ else if (!strncmp(cmd, "autodloader", 11))
+ reboot_mode = HWRST_STATUS_AUTODLOADER;
+ else if (!strncmp(cmd, "iqmode", 6))
+ reboot_mode = HWRST_STATUS_IQMODE;
+ else if (!strncmp(cmd, "sprdisk", 7))
+ reboot_mode = HWRST_STATUS_SPRDISK;
+ else if (!strncmp(cmd, "tospanic", 8))
+ reboot_mode = HWRST_STATUS_SECURITY;
+ else if (!strncmp(cmd, "factorytest", 11))
+ reboot_mode = HWRST_STATUS_FACTORYTEST;
+ else
+ reboot_mode = HWRST_STATUS_NORMAL;
+
+ /* Record the reboot mode */
+ sprd_adi_read(sadi, wdg->rst_sts, &val);
+ val &= ~HWRST_STATUS_WATCHDOG;
+ val |= reboot_mode;
+ sprd_adi_write(sadi, wdg->rst_sts, val);
+
+ /* Enable the interface clock of the watchdog */
+ sprd_adi_read(sadi, wdg->wdg_en, &val);
+ val |= BIT_WDG_EN;
+ sprd_adi_write(sadi, wdg->wdg_en, val);
+
+ /* Enable the work clock of the watchdog */
+ sprd_adi_read(sadi, wdg->wdg_clk, &val);
+ val |= BIT_WDG_EN;
+ sprd_adi_write(sadi, wdg->wdg_clk, val);
+
+ /* Unlock the watchdog */
+ sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, WDG_UNLOCK_KEY);
+
+ sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
+ val |= BIT_WDG_NEW;
+ sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
+
+ /* Load the watchdog timeout value, 50ms is always enough. */
+ sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_HIGH, 0);
+ sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_LOW,
+ WDG_LOAD_VAL & WDG_LOAD_MASK);
+
+ /* Start the watchdog to reset system */
+ sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
+ val |= BIT_WDG_RUN | BIT_WDG_RST;
+ sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
+
+ /* Lock the watchdog */
+ sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
+
+ mdelay(1000);
+
+ dev_emerg(sadi->dev, "Unable to restart system\n");
+ return NOTIFY_DONE;
+}
+
+static int sprd_adi_restart_sc9860(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ struct sprd_adi_wdg wdg = {
+ .base = PMIC_WDG_BASE,
+ .rst_sts = PMIC_RST_STATUS,
+ .wdg_en = PMIC_MODULE_EN,
+ .wdg_clk = PMIC_CLK_EN,
+ };
+
+ return sprd_adi_restart(this, mode, cmd, &wdg);
+}
+
+static void sprd_adi_hw_init(struct sprd_adi *sadi)
+{
+ struct device_node *np = sadi->dev->of_node;
+ int i, size, chn_cnt;
+ const __be32 *list;
+ u32 tmp;
+
+ /* Set all channels as default priority */
+ writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIL);
+ writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIH);
+
+ /* Set clock auto gate mode */
+ tmp = readl_relaxed(sadi->base + REG_ADI_GSSI_CFG0);
+ tmp &= ~BIT_CLK_ALL_ON;
+ writel_relaxed(tmp, sadi->base + REG_ADI_GSSI_CFG0);
+
+ /* Set hardware channels setting */
+ list = of_get_property(np, "sprd,hw-channels", &size);
+ if (!list || !size) {
+ dev_info(sadi->dev, "no hw channels setting in node\n");
+ return;
+ }
+
+ chn_cnt = size / 8;
+ for (i = 0; i < chn_cnt; i++) {
+ u32 value;
+ u32 chn_id = be32_to_cpu(*list++);
+ u32 chn_config = be32_to_cpu(*list++);
+
+ /* Channel 0 and 1 are software channels */
+ if (chn_id < 2)
+ continue;
+
+ writel_relaxed(chn_config, sadi->base +
+ REG_ADI_CHN_ADDR(chn_id));
+
+ if (chn_id < 32) {
+ value = readl_relaxed(sadi->base + REG_ADI_CHN_EN);
+ value |= BIT(chn_id);
+ writel_relaxed(value, sadi->base + REG_ADI_CHN_EN);
+ } else if (chn_id < ADI_HW_CHNS) {
+ value = readl_relaxed(sadi->base + REG_ADI_CHN_EN1);
+ value |= BIT(chn_id - 32);
+ writel_relaxed(value, sadi->base + REG_ADI_CHN_EN1);
+ }
+ }
+}
+
+static int sprd_adi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct sprd_adi_data *data;
+ struct spi_controller *ctlr;
+ struct sprd_adi *sadi;
+ struct resource *res;
+ u16 num_chipselect;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "can not find the adi bus node\n");
+ return -ENODEV;
+ }
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "no matching driver data found\n");
+ return -EINVAL;
+ }
+
+ pdev->id = of_alias_get_id(np, "spi");
+ num_chipselect = of_get_child_count(np);
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+ sadi = spi_controller_get_devdata(ctlr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sadi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sadi->base)) {
+ ret = PTR_ERR(sadi->base);
+ goto put_ctlr;
+ }
+
+ sadi->slave_vbase = (unsigned long)sadi->base +
+ data->slave_offset;
+ sadi->slave_pbase = res->start + data->slave_offset;
+ sadi->ctlr = ctlr;
+ sadi->dev = &pdev->dev;
+ sadi->data = data;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
+ sadi->hwlock =
+ devm_hwspin_lock_request_specific(&pdev->dev, ret);
+ if (!sadi->hwlock) {
+ ret = -ENXIO;
+ goto put_ctlr;
+ }
+ } else {
+ switch (ret) {
+ case -ENOENT:
+ dev_info(&pdev->dev, "no hardware spinlock supplied\n");
+ break;
+ default:
+ dev_err_probe(&pdev->dev, ret, "failed to find hwlock id\n");
+ goto put_ctlr;
+ }
+ }
+
+ sprd_adi_hw_init(sadi);
+
+ if (sadi->data->wdg_rst)
+ sadi->data->wdg_rst(sadi);
+
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = num_chipselect;
+ ctlr->flags = SPI_MASTER_HALF_DUPLEX;
+ ctlr->bits_per_word_mask = 0;
+ ctlr->transfer_one = sprd_adi_transfer_one;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register SPI controller\n");
+ goto put_ctlr;
+ }
+
+ if (sadi->data->restart) {
+ sadi->restart_handler.notifier_call = sadi->data->restart;
+ sadi->restart_handler.priority = 128;
+ ret = register_restart_handler(&sadi->restart_handler);
+ if (ret) {
+ dev_err(&pdev->dev, "can not register restart handler\n");
+ goto put_ctlr;
+ }
+ }
+
+ return 0;
+
+put_ctlr:
+ spi_controller_put(ctlr);
+ return ret;
+}
+
+static int sprd_adi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
+ struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
+
+ unregister_restart_handler(&sadi->restart_handler);
+ return 0;
+}
+
+static struct sprd_adi_data sc9860_data = {
+ .slave_offset = ADI_10BIT_SLAVE_OFFSET,
+ .slave_addr_size = ADI_10BIT_SLAVE_ADDR_SIZE,
+ .read_check = sprd_adi_read_check_r2,
+ .restart = sprd_adi_restart_sc9860,
+ .wdg_rst = sprd_adi_set_wdt_rst_mode,
+};
+
+static struct sprd_adi_data sc9863_data = {
+ .slave_offset = ADI_12BIT_SLAVE_OFFSET,
+ .slave_addr_size = ADI_12BIT_SLAVE_ADDR_SIZE,
+ .read_check = sprd_adi_read_check_r3,
+};
+
+static struct sprd_adi_data ums512_data = {
+ .slave_offset = ADI_15BIT_SLAVE_OFFSET,
+ .slave_addr_size = ADI_15BIT_SLAVE_ADDR_SIZE,
+ .read_check = sprd_adi_read_check_r3,
+};
+
+static const struct of_device_id sprd_adi_of_match[] = {
+ {
+ .compatible = "sprd,sc9860-adi",
+ .data = &sc9860_data,
+ },
+ {
+ .compatible = "sprd,sc9863-adi",
+ .data = &sc9863_data,
+ },
+ {
+ .compatible = "sprd,ums512-adi",
+ .data = &ums512_data,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_adi_of_match);
+
+static struct platform_driver sprd_adi_driver = {
+ .driver = {
+ .name = "sprd-adi",
+ .of_match_table = sprd_adi_of_match,
+ },
+ .probe = sprd_adi_probe,
+ .remove = sprd_adi_remove,
+};
+module_platform_driver(sprd_adi_driver);
+
+MODULE_DESCRIPTION("Spreadtrum ADI Controller Driver");
+MODULE_AUTHOR("Baolin Wang <Baolin.Wang@spreadtrum.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
new file mode 100644
index 000000000..65b8075da
--- /dev/null
+++ b/drivers/spi/spi-sprd.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Spreadtrum Communications Inc.
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/sprd-dma.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#define SPRD_SPI_TXD 0x0
+#define SPRD_SPI_CLKD 0x4
+#define SPRD_SPI_CTL0 0x8
+#define SPRD_SPI_CTL1 0xc
+#define SPRD_SPI_CTL2 0x10
+#define SPRD_SPI_CTL3 0x14
+#define SPRD_SPI_CTL4 0x18
+#define SPRD_SPI_CTL5 0x1c
+#define SPRD_SPI_INT_EN 0x20
+#define SPRD_SPI_INT_CLR 0x24
+#define SPRD_SPI_INT_RAW_STS 0x28
+#define SPRD_SPI_INT_MASK_STS 0x2c
+#define SPRD_SPI_STS1 0x30
+#define SPRD_SPI_STS2 0x34
+#define SPRD_SPI_DSP_WAIT 0x38
+#define SPRD_SPI_STS3 0x3c
+#define SPRD_SPI_CTL6 0x40
+#define SPRD_SPI_STS4 0x44
+#define SPRD_SPI_FIFO_RST 0x48
+#define SPRD_SPI_CTL7 0x4c
+#define SPRD_SPI_STS5 0x50
+#define SPRD_SPI_CTL8 0x54
+#define SPRD_SPI_CTL9 0x58
+#define SPRD_SPI_CTL10 0x5c
+#define SPRD_SPI_CTL11 0x60
+#define SPRD_SPI_CTL12 0x64
+#define SPRD_SPI_STS6 0x68
+#define SPRD_SPI_STS7 0x6c
+#define SPRD_SPI_STS8 0x70
+#define SPRD_SPI_STS9 0x74
+
+/* Bits & mask definition for register CTL0 */
+#define SPRD_SPI_SCK_REV BIT(13)
+#define SPRD_SPI_NG_TX BIT(1)
+#define SPRD_SPI_NG_RX BIT(0)
+#define SPRD_SPI_CHNL_LEN_MASK GENMASK(4, 0)
+#define SPRD_SPI_CSN_MASK GENMASK(11, 8)
+#define SPRD_SPI_CS0_VALID BIT(8)
+
+/* Bits & mask definition for register SPI_INT_EN */
+#define SPRD_SPI_TX_END_INT_EN BIT(8)
+#define SPRD_SPI_RX_END_INT_EN BIT(9)
+
+/* Bits & mask definition for register SPI_INT_RAW_STS */
+#define SPRD_SPI_TX_END_RAW BIT(8)
+#define SPRD_SPI_RX_END_RAW BIT(9)
+
+/* Bits & mask definition for register SPI_INT_CLR */
+#define SPRD_SPI_TX_END_CLR BIT(8)
+#define SPRD_SPI_RX_END_CLR BIT(9)
+
+/* Bits & mask definition for register INT_MASK_STS */
+#define SPRD_SPI_MASK_RX_END BIT(9)
+#define SPRD_SPI_MASK_TX_END BIT(8)
+
+/* Bits & mask definition for register STS2 */
+#define SPRD_SPI_TX_BUSY BIT(8)
+
+/* Bits & mask definition for register CTL1 */
+#define SPRD_SPI_RX_MODE BIT(12)
+#define SPRD_SPI_TX_MODE BIT(13)
+#define SPRD_SPI_RTX_MD_MASK GENMASK(13, 12)
+
+/* Bits & mask definition for register CTL2 */
+#define SPRD_SPI_DMA_EN BIT(6)
+
+/* Bits & mask definition for register CTL4 */
+#define SPRD_SPI_START_RX BIT(9)
+#define SPRD_SPI_ONLY_RECV_MASK GENMASK(8, 0)
+
+/* Bits & mask definition for register SPI_INT_CLR */
+#define SPRD_SPI_RX_END_INT_CLR BIT(9)
+#define SPRD_SPI_TX_END_INT_CLR BIT(8)
+
+/* Bits & mask definition for register SPI_INT_RAW */
+#define SPRD_SPI_RX_END_IRQ BIT(9)
+#define SPRD_SPI_TX_END_IRQ BIT(8)
+
+/* Bits & mask definition for register CTL12 */
+#define SPRD_SPI_SW_RX_REQ BIT(0)
+#define SPRD_SPI_SW_TX_REQ BIT(1)
+
+/* Bits & mask definition for register CTL7 */
+#define SPRD_SPI_DATA_LINE2_EN BIT(15)
+#define SPRD_SPI_MODE_MASK GENMASK(5, 3)
+#define SPRD_SPI_MODE_OFFSET 3
+#define SPRD_SPI_3WIRE_MODE 4
+#define SPRD_SPI_4WIRE_MODE 0
+
+/* Bits & mask definition for register CTL8 */
+#define SPRD_SPI_TX_MAX_LEN_MASK GENMASK(19, 0)
+#define SPRD_SPI_TX_LEN_H_MASK GENMASK(3, 0)
+#define SPRD_SPI_TX_LEN_H_OFFSET 16
+
+/* Bits & mask definition for register CTL9 */
+#define SPRD_SPI_TX_LEN_L_MASK GENMASK(15, 0)
+
+/* Bits & mask definition for register CTL10 */
+#define SPRD_SPI_RX_MAX_LEN_MASK GENMASK(19, 0)
+#define SPRD_SPI_RX_LEN_H_MASK GENMASK(3, 0)
+#define SPRD_SPI_RX_LEN_H_OFFSET 16
+
+/* Bits & mask definition for register CTL11 */
+#define SPRD_SPI_RX_LEN_L_MASK GENMASK(15, 0)
+
+/* Default & maximum word delay cycles */
+#define SPRD_SPI_MIN_DELAY_CYCLE 14
+#define SPRD_SPI_MAX_DELAY_CYCLE 130
+
+#define SPRD_SPI_FIFO_SIZE 32
+#define SPRD_SPI_CHIP_CS_NUM 0x4
+#define SPRD_SPI_CHNL_LEN 2
+#define SPRD_SPI_DEFAULT_SOURCE 26000000
+#define SPRD_SPI_MAX_SPEED_HZ 48000000
+#define SPRD_SPI_AUTOSUSPEND_DELAY 100
+#define SPRD_SPI_DMA_STEP 8
+
+enum sprd_spi_dma_channel {
+ SPRD_SPI_RX,
+ SPRD_SPI_TX,
+ SPRD_SPI_MAX,
+};
+
+struct sprd_spi_dma {
+ bool enable;
+ struct dma_chan *dma_chan[SPRD_SPI_MAX];
+ enum dma_slave_buswidth width;
+ u32 fragmens_len;
+ u32 rx_len;
+};
+
+struct sprd_spi {
+ void __iomem *base;
+ phys_addr_t phy_base;
+ struct device *dev;
+ struct clk *clk;
+ int irq;
+ u32 src_clk;
+ u32 hw_mode;
+ u32 trans_len;
+ u32 trans_mode;
+ u32 word_delay;
+ u32 hw_speed_hz;
+ u32 len;
+ int status;
+ struct sprd_spi_dma dma;
+ struct completion xfer_completion;
+ const void *tx_buf;
+ void *rx_buf;
+ int (*read_bufs)(struct sprd_spi *ss, u32 len);
+ int (*write_bufs)(struct sprd_spi *ss, u32 len);
+};
+
+static u32 sprd_spi_transfer_max_timeout(struct sprd_spi *ss,
+ struct spi_transfer *t)
+{
+ /*
+ * The time spent on transmission of the full FIFO data is the maximum
+ * SPI transmission time.
+ */
+ u32 size = t->bits_per_word * SPRD_SPI_FIFO_SIZE;
+ u32 bit_time_us = DIV_ROUND_UP(USEC_PER_SEC, ss->hw_speed_hz);
+ u32 total_time_us = size * bit_time_us;
+ /*
+ * There is an interval between data and the data in our SPI hardware,
+ * so the total transmission time need add the interval time.
+ */
+ u32 interval_cycle = SPRD_SPI_FIFO_SIZE * ss->word_delay;
+ u32 interval_time_us = DIV_ROUND_UP(interval_cycle * USEC_PER_SEC,
+ ss->src_clk);
+
+ return total_time_us + interval_time_us;
+}
+
+static int sprd_spi_wait_for_tx_end(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ u32 val, us;
+ int ret;
+
+ us = sprd_spi_transfer_max_timeout(ss, t);
+ ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
+ val & SPRD_SPI_TX_END_IRQ, 0, us);
+ if (ret) {
+ dev_err(ss->dev, "SPI error, spi send timeout!\n");
+ return ret;
+ }
+
+ ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_STS2, val,
+ !(val & SPRD_SPI_TX_BUSY), 0, us);
+ if (ret) {
+ dev_err(ss->dev, "SPI error, spi busy timeout!\n");
+ return ret;
+ }
+
+ writel_relaxed(SPRD_SPI_TX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
+
+ return 0;
+}
+
+static int sprd_spi_wait_for_rx_end(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ u32 val, us;
+ int ret;
+
+ us = sprd_spi_transfer_max_timeout(ss, t);
+ ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
+ val & SPRD_SPI_RX_END_IRQ, 0, us);
+ if (ret) {
+ dev_err(ss->dev, "SPI error, spi rx timeout!\n");
+ return ret;
+ }
+
+ writel_relaxed(SPRD_SPI_RX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
+
+ return 0;
+}
+
+static void sprd_spi_tx_req(struct sprd_spi *ss)
+{
+ writel_relaxed(SPRD_SPI_SW_TX_REQ, ss->base + SPRD_SPI_CTL12);
+}
+
+static void sprd_spi_rx_req(struct sprd_spi *ss)
+{
+ writel_relaxed(SPRD_SPI_SW_RX_REQ, ss->base + SPRD_SPI_CTL12);
+}
+
+static void sprd_spi_enter_idle(struct sprd_spi *ss)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
+
+ val &= ~SPRD_SPI_RTX_MD_MASK;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL1);
+}
+
+static void sprd_spi_set_transfer_bits(struct sprd_spi *ss, u32 bits)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
+
+ /* Set the valid bits for every transaction */
+ val &= ~(SPRD_SPI_CHNL_LEN_MASK << SPRD_SPI_CHNL_LEN);
+ val |= bits << SPRD_SPI_CHNL_LEN;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
+}
+
+static void sprd_spi_set_tx_length(struct sprd_spi *ss, u32 length)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL8);
+
+ length &= SPRD_SPI_TX_MAX_LEN_MASK;
+ val &= ~SPRD_SPI_TX_LEN_H_MASK;
+ val |= length >> SPRD_SPI_TX_LEN_H_OFFSET;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL8);
+
+ val = length & SPRD_SPI_TX_LEN_L_MASK;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL9);
+}
+
+static void sprd_spi_set_rx_length(struct sprd_spi *ss, u32 length)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL10);
+
+ length &= SPRD_SPI_RX_MAX_LEN_MASK;
+ val &= ~SPRD_SPI_RX_LEN_H_MASK;
+ val |= length >> SPRD_SPI_RX_LEN_H_OFFSET;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL10);
+
+ val = length & SPRD_SPI_RX_LEN_L_MASK;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL11);
+}
+
+static void sprd_spi_chipselect(struct spi_device *sdev, bool cs)
+{
+ struct spi_controller *sctlr = sdev->controller;
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+ u32 val;
+
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
+ /* The SPI controller will pull down CS pin if cs is 0 */
+ if (!cs) {
+ val &= ~SPRD_SPI_CS0_VALID;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
+ } else {
+ val |= SPRD_SPI_CSN_MASK;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
+ }
+}
+
+static int sprd_spi_write_only_receive(struct sprd_spi *ss, u32 len)
+{
+ u32 val;
+
+ /* Clear the start receive bit and reset receive data number */
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
+ val &= ~(SPRD_SPI_START_RX | SPRD_SPI_ONLY_RECV_MASK);
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
+
+ /* Set the receive data length */
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
+ val |= len & SPRD_SPI_ONLY_RECV_MASK;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
+
+ /* Trigger to receive data */
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
+ val |= SPRD_SPI_START_RX;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
+
+ return len;
+}
+
+static int sprd_spi_write_bufs_u8(struct sprd_spi *ss, u32 len)
+{
+ u8 *tx_p = (u8 *)ss->tx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ writeb_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
+
+ ss->tx_buf += i;
+ return i;
+}
+
+static int sprd_spi_write_bufs_u16(struct sprd_spi *ss, u32 len)
+{
+ u16 *tx_p = (u16 *)ss->tx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ writew_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
+
+ ss->tx_buf += i << 1;
+ return i << 1;
+}
+
+static int sprd_spi_write_bufs_u32(struct sprd_spi *ss, u32 len)
+{
+ u32 *tx_p = (u32 *)ss->tx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ writel_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
+
+ ss->tx_buf += i << 2;
+ return i << 2;
+}
+
+static int sprd_spi_read_bufs_u8(struct sprd_spi *ss, u32 len)
+{
+ u8 *rx_p = (u8 *)ss->rx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ rx_p[i] = readb_relaxed(ss->base + SPRD_SPI_TXD);
+
+ ss->rx_buf += i;
+ return i;
+}
+
+static int sprd_spi_read_bufs_u16(struct sprd_spi *ss, u32 len)
+{
+ u16 *rx_p = (u16 *)ss->rx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ rx_p[i] = readw_relaxed(ss->base + SPRD_SPI_TXD);
+
+ ss->rx_buf += i << 1;
+ return i << 1;
+}
+
+static int sprd_spi_read_bufs_u32(struct sprd_spi *ss, u32 len)
+{
+ u32 *rx_p = (u32 *)ss->rx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ rx_p[i] = readl_relaxed(ss->base + SPRD_SPI_TXD);
+
+ ss->rx_buf += i << 2;
+ return i << 2;
+}
+
+static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
+{
+ struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
+ u32 trans_len = ss->trans_len, len;
+ int ret, write_size = 0, read_size = 0;
+
+ while (trans_len) {
+ len = trans_len > SPRD_SPI_FIFO_SIZE ? SPRD_SPI_FIFO_SIZE :
+ trans_len;
+ if (ss->trans_mode & SPRD_SPI_TX_MODE) {
+ sprd_spi_set_tx_length(ss, len);
+ write_size += ss->write_bufs(ss, len);
+
+ /*
+ * For our 3 wires mode or dual TX line mode, we need
+ * to request the controller to transfer.
+ */
+ if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
+ sprd_spi_tx_req(ss);
+
+ ret = sprd_spi_wait_for_tx_end(ss, t);
+ } else {
+ sprd_spi_set_rx_length(ss, len);
+
+ /*
+ * For our 3 wires mode or dual TX line mode, we need
+ * to request the controller to read.
+ */
+ if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
+ sprd_spi_rx_req(ss);
+ else
+ write_size += ss->write_bufs(ss, len);
+
+ ret = sprd_spi_wait_for_rx_end(ss, t);
+ }
+
+ if (ret)
+ goto complete;
+
+ if (ss->trans_mode & SPRD_SPI_RX_MODE)
+ read_size += ss->read_bufs(ss, len);
+
+ trans_len -= len;
+ }
+
+ if (ss->trans_mode & SPRD_SPI_TX_MODE)
+ ret = write_size;
+ else
+ ret = read_size;
+complete:
+ sprd_spi_enter_idle(ss);
+
+ return ret;
+}
+
+static void sprd_spi_irq_enable(struct sprd_spi *ss)
+{
+ u32 val;
+
+ /* Clear interrupt status before enabling interrupt. */
+ writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR,
+ ss->base + SPRD_SPI_INT_CLR);
+ /* Enable SPI interrupt only in DMA mode. */
+ val = readl_relaxed(ss->base + SPRD_SPI_INT_EN);
+ writel_relaxed(val | SPRD_SPI_TX_END_INT_EN |
+ SPRD_SPI_RX_END_INT_EN,
+ ss->base + SPRD_SPI_INT_EN);
+}
+
+static void sprd_spi_irq_disable(struct sprd_spi *ss)
+{
+ writel_relaxed(0, ss->base + SPRD_SPI_INT_EN);
+}
+
+static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2);
+
+ if (enable)
+ val |= SPRD_SPI_DMA_EN;
+ else
+ val &= ~SPRD_SPI_DMA_EN;
+
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL2);
+}
+
+static int sprd_spi_dma_submit(struct dma_chan *dma_chan,
+ struct dma_slave_config *c,
+ struct sg_table *sg,
+ enum dma_transfer_direction dir)
+{
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ unsigned long flags;
+ int ret;
+
+ ret = dmaengine_slave_config(dma_chan, c);
+ if (ret < 0)
+ return ret;
+
+ flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
+ SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
+ desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags);
+ if (!desc)
+ return -ENODEV;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie))
+ return dma_submit_error(cookie);
+
+ dma_async_issue_pending(dma_chan);
+
+ return 0;
+}
+
+static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX];
+ struct dma_slave_config config = {
+ .src_addr = ss->phy_base,
+ .src_addr_width = ss->dma.width,
+ .dst_addr_width = ss->dma.width,
+ .dst_maxburst = ss->dma.fragmens_len,
+ };
+ int ret;
+
+ ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM);
+ if (ret)
+ return ret;
+
+ return ss->dma.rx_len;
+}
+
+static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX];
+ struct dma_slave_config config = {
+ .dst_addr = ss->phy_base,
+ .src_addr_width = ss->dma.width,
+ .dst_addr_width = ss->dma.width,
+ .src_maxburst = ss->dma.fragmens_len,
+ };
+ int ret;
+
+ ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV);
+ if (ret)
+ return ret;
+
+ return t->len;
+}
+
+static int sprd_spi_dma_request(struct sprd_spi *ss)
+{
+ ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn");
+ if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX]))
+ return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]),
+ "request RX DMA channel failed!\n");
+
+ ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn");
+ if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
+ dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
+ return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]),
+ "request TX DMA channel failed!\n");
+ }
+
+ return 0;
+}
+
+static void sprd_spi_dma_release(struct sprd_spi *ss)
+{
+ if (ss->dma.dma_chan[SPRD_SPI_RX])
+ dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
+
+ if (ss->dma.dma_chan[SPRD_SPI_TX])
+ dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]);
+}
+
+static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev,
+ struct spi_transfer *t)
+{
+ struct sprd_spi *ss = spi_master_get_devdata(sdev->master);
+ u32 trans_len = ss->trans_len;
+ int ret, write_size = 0;
+
+ reinit_completion(&ss->xfer_completion);
+ sprd_spi_irq_enable(ss);
+ if (ss->trans_mode & SPRD_SPI_TX_MODE) {
+ write_size = sprd_spi_dma_tx_config(ss, t);
+ sprd_spi_set_tx_length(ss, trans_len);
+
+ /*
+ * For our 3 wires mode or dual TX line mode, we need
+ * to request the controller to transfer.
+ */
+ if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
+ sprd_spi_tx_req(ss);
+ } else {
+ sprd_spi_set_rx_length(ss, trans_len);
+
+ /*
+ * For our 3 wires mode or dual TX line mode, we need
+ * to request the controller to read.
+ */
+ if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
+ sprd_spi_rx_req(ss);
+ else
+ write_size = ss->write_bufs(ss, trans_len);
+ }
+
+ if (write_size < 0) {
+ ret = write_size;
+ dev_err(ss->dev, "failed to write, ret = %d\n", ret);
+ goto trans_complete;
+ }
+
+ if (ss->trans_mode & SPRD_SPI_RX_MODE) {
+ /*
+ * Set up the DMA receive data length, which must be an
+ * integral multiple of fragment length. But when the length
+ * of received data is less than fragment length, DMA can be
+ * configured to receive data according to the actual length
+ * of received data.
+ */
+ ss->dma.rx_len = t->len > ss->dma.fragmens_len ?
+ (t->len - t->len % ss->dma.fragmens_len) :
+ t->len;
+ ret = sprd_spi_dma_rx_config(ss, t);
+ if (ret < 0) {
+ dev_err(&sdev->dev,
+ "failed to configure rx DMA, ret = %d\n", ret);
+ goto trans_complete;
+ }
+ }
+
+ sprd_spi_dma_enable(ss, true);
+ wait_for_completion(&(ss->xfer_completion));
+
+ if (ss->trans_mode & SPRD_SPI_TX_MODE)
+ ret = write_size;
+ else
+ ret = ss->dma.rx_len;
+
+trans_complete:
+ sprd_spi_dma_enable(ss, false);
+ sprd_spi_enter_idle(ss);
+ sprd_spi_irq_disable(ss);
+
+ return ret;
+}
+
+static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
+{
+ /*
+ * From SPI datasheet, the prescale calculation formula:
+ * prescale = SPI source clock / (2 * SPI_freq) - 1;
+ */
+ u32 clk_div = DIV_ROUND_UP(ss->src_clk, speed_hz << 1) - 1;
+
+ /* Save the real hardware speed */
+ ss->hw_speed_hz = (ss->src_clk >> 1) / (clk_div + 1);
+ writel_relaxed(clk_div, ss->base + SPRD_SPI_CLKD);
+}
+
+static int sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ struct spi_delay *d = &t->word_delay;
+ u16 word_delay, interval;
+ u32 val;
+
+ if (d->unit != SPI_DELAY_UNIT_SCK)
+ return -EINVAL;
+
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
+ val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
+ /* Set default chip selection, clock phase and clock polarity */
+ val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX;
+ val |= ss->hw_mode & SPI_CPOL ? SPRD_SPI_SCK_REV : 0;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
+
+ /*
+ * Set the intervals of two SPI frames, and the inteval calculation
+ * formula as below per datasheet:
+ * interval time (source clock cycles) = interval * 4 + 10.
+ */
+ word_delay = clamp_t(u16, d->value, SPRD_SPI_MIN_DELAY_CYCLE,
+ SPRD_SPI_MAX_DELAY_CYCLE);
+ interval = DIV_ROUND_UP(word_delay - 10, 4);
+ ss->word_delay = interval * 4 + 10;
+ writel_relaxed(interval, ss->base + SPRD_SPI_CTL5);
+
+ /* Reset SPI fifo */
+ writel_relaxed(1, ss->base + SPRD_SPI_FIFO_RST);
+ writel_relaxed(0, ss->base + SPRD_SPI_FIFO_RST);
+
+ /* Set SPI work mode */
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
+ val &= ~SPRD_SPI_MODE_MASK;
+
+ if (ss->hw_mode & SPI_3WIRE)
+ val |= SPRD_SPI_3WIRE_MODE << SPRD_SPI_MODE_OFFSET;
+ else
+ val |= SPRD_SPI_4WIRE_MODE << SPRD_SPI_MODE_OFFSET;
+
+ if (ss->hw_mode & SPI_TX_DUAL)
+ val |= SPRD_SPI_DATA_LINE2_EN;
+ else
+ val &= ~SPRD_SPI_DATA_LINE2_EN;
+
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL7);
+
+ return 0;
+}
+
+static int sprd_spi_setup_transfer(struct spi_device *sdev,
+ struct spi_transfer *t)
+{
+ struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
+ u8 bits_per_word = t->bits_per_word;
+ u32 val, mode = 0;
+ int ret;
+
+ ss->len = t->len;
+ ss->tx_buf = t->tx_buf;
+ ss->rx_buf = t->rx_buf;
+
+ ss->hw_mode = sdev->mode;
+ ret = sprd_spi_init_hw(ss, t);
+ if (ret)
+ return ret;
+
+ /* Set tansfer speed and valid bits */
+ sprd_spi_set_speed(ss, t->speed_hz);
+ sprd_spi_set_transfer_bits(ss, bits_per_word);
+
+ if (bits_per_word > 16)
+ bits_per_word = round_up(bits_per_word, 16);
+ else
+ bits_per_word = round_up(bits_per_word, 8);
+
+ switch (bits_per_word) {
+ case 8:
+ ss->trans_len = t->len;
+ ss->read_bufs = sprd_spi_read_bufs_u8;
+ ss->write_bufs = sprd_spi_write_bufs_u8;
+ ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ ss->dma.fragmens_len = SPRD_SPI_DMA_STEP;
+ break;
+ case 16:
+ ss->trans_len = t->len >> 1;
+ ss->read_bufs = sprd_spi_read_bufs_u16;
+ ss->write_bufs = sprd_spi_write_bufs_u16;
+ ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1;
+ break;
+ case 32:
+ ss->trans_len = t->len >> 2;
+ ss->read_bufs = sprd_spi_read_bufs_u32;
+ ss->write_bufs = sprd_spi_write_bufs_u32;
+ ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set transfer read or write mode */
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
+ val &= ~SPRD_SPI_RTX_MD_MASK;
+ if (t->tx_buf)
+ mode |= SPRD_SPI_TX_MODE;
+ if (t->rx_buf)
+ mode |= SPRD_SPI_RX_MODE;
+
+ writel_relaxed(val | mode, ss->base + SPRD_SPI_CTL1);
+
+ ss->trans_mode = mode;
+
+ /*
+ * If in only receive mode, we need to trigger the SPI controller to
+ * receive data automatically.
+ */
+ if (ss->trans_mode == SPRD_SPI_RX_MODE)
+ ss->write_bufs = sprd_spi_write_only_receive;
+
+ return 0;
+}
+
+static int sprd_spi_transfer_one(struct spi_controller *sctlr,
+ struct spi_device *sdev,
+ struct spi_transfer *t)
+{
+ int ret;
+
+ ret = sprd_spi_setup_transfer(sdev, t);
+ if (ret)
+ goto setup_err;
+
+ if (sctlr->can_dma(sctlr, sdev, t))
+ ret = sprd_spi_dma_txrx_bufs(sdev, t);
+ else
+ ret = sprd_spi_txrx_bufs(sdev, t);
+
+ if (ret == t->len)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EREMOTEIO;
+
+setup_err:
+ spi_finalize_current_transfer(sctlr);
+
+ return ret;
+}
+
+static irqreturn_t sprd_spi_handle_irq(int irq, void *data)
+{
+ struct sprd_spi *ss = (struct sprd_spi *)data;
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS);
+
+ if (val & SPRD_SPI_MASK_TX_END) {
+ writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
+ if (!(ss->trans_mode & SPRD_SPI_RX_MODE))
+ complete(&ss->xfer_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ if (val & SPRD_SPI_MASK_RX_END) {
+ writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
+ if (ss->dma.rx_len < ss->len) {
+ ss->rx_buf += ss->dma.rx_len;
+ ss->dma.rx_len +=
+ ss->read_bufs(ss, ss->len - ss->dma.rx_len);
+ }
+ complete(&ss->xfer_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
+{
+ int ret;
+
+ ss->irq = platform_get_irq(pdev, 0);
+ if (ss->irq < 0)
+ return ss->irq;
+
+ ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
+ 0, pdev->name, ss);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n",
+ ss->irq, ret);
+
+ return ret;
+}
+
+static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
+{
+ struct clk *clk_spi, *clk_parent;
+
+ clk_spi = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(clk_spi)) {
+ dev_warn(&pdev->dev, "can't get the spi clock\n");
+ clk_spi = NULL;
+ }
+
+ clk_parent = devm_clk_get(&pdev->dev, "source");
+ if (IS_ERR(clk_parent)) {
+ dev_warn(&pdev->dev, "can't get the source clock\n");
+ clk_parent = NULL;
+ }
+
+ ss->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(ss->clk)) {
+ dev_err(&pdev->dev, "can't get the enable clock\n");
+ return PTR_ERR(ss->clk);
+ }
+
+ if (!clk_set_parent(clk_spi, clk_parent))
+ ss->src_clk = clk_get_rate(clk_spi);
+ else
+ ss->src_clk = SPRD_SPI_DEFAULT_SOURCE;
+
+ return 0;
+}
+
+static bool sprd_spi_can_dma(struct spi_controller *sctlr,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+
+ return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE);
+}
+
+static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss)
+{
+ int ret;
+
+ ret = sprd_spi_dma_request(ss);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ dev_warn(&pdev->dev,
+ "failed to request dma, enter no dma mode, ret = %d\n",
+ ret);
+
+ return 0;
+ }
+
+ ss->dma.enable = true;
+
+ return 0;
+}
+
+static int sprd_spi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *sctlr;
+ struct resource *res;
+ struct sprd_spi *ss;
+ int ret;
+
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "spi");
+ sctlr = spi_alloc_master(&pdev->dev, sizeof(*ss));
+ if (!sctlr)
+ return -ENOMEM;
+
+ ss = spi_controller_get_devdata(sctlr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ss->base)) {
+ ret = PTR_ERR(ss->base);
+ goto free_controller;
+ }
+
+ ss->phy_base = res->start;
+ ss->dev = &pdev->dev;
+ sctlr->dev.of_node = pdev->dev.of_node;
+ sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL;
+ sctlr->bus_num = pdev->id;
+ sctlr->set_cs = sprd_spi_chipselect;
+ sctlr->transfer_one = sprd_spi_transfer_one;
+ sctlr->can_dma = sprd_spi_can_dma;
+ sctlr->auto_runtime_pm = true;
+ sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1,
+ SPRD_SPI_MAX_SPEED_HZ);
+
+ init_completion(&ss->xfer_completion);
+ platform_set_drvdata(pdev, sctlr);
+ ret = sprd_spi_clk_init(pdev, ss);
+ if (ret)
+ goto free_controller;
+
+ ret = sprd_spi_irq_init(pdev, ss);
+ if (ret)
+ goto free_controller;
+
+ ret = sprd_spi_dma_init(pdev, ss);
+ if (ret)
+ goto free_controller;
+
+ ret = clk_prepare_enable(ss->clk);
+ if (ret)
+ goto release_dma;
+
+ ret = pm_runtime_set_active(&pdev->dev);
+ if (ret < 0)
+ goto disable_clk;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ SPRD_SPI_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to resume SPI controller\n");
+ goto err_rpm_put;
+ }
+
+ ret = devm_spi_register_controller(&pdev->dev, sctlr);
+ if (ret)
+ goto err_rpm_put;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+disable_clk:
+ clk_disable_unprepare(ss->clk);
+release_dma:
+ sprd_spi_dma_release(ss);
+free_controller:
+ spi_controller_put(sctlr);
+
+ return ret;
+}
+
+static int sprd_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *sctlr = platform_get_drvdata(pdev);
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ss->dev);
+ if (ret < 0) {
+ dev_err(ss->dev, "failed to resume SPI controller\n");
+ return ret;
+ }
+
+ spi_controller_suspend(sctlr);
+
+ if (ss->dma.enable)
+ sprd_spi_dma_release(ss);
+ clk_disable_unprepare(ss->clk);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *sctlr = dev_get_drvdata(dev);
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+
+ if (ss->dma.enable)
+ sprd_spi_dma_release(ss);
+
+ clk_disable_unprepare(ss->clk);
+
+ return 0;
+}
+
+static int __maybe_unused sprd_spi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *sctlr = dev_get_drvdata(dev);
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+ int ret;
+
+ ret = clk_prepare_enable(ss->clk);
+ if (ret)
+ return ret;
+
+ if (!ss->dma.enable)
+ return 0;
+
+ ret = sprd_spi_dma_request(ss);
+ if (ret)
+ clk_disable_unprepare(ss->clk);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sprd_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(sprd_spi_runtime_suspend,
+ sprd_spi_runtime_resume, NULL)
+};
+
+static const struct of_device_id sprd_spi_of_match[] = {
+ { .compatible = "sprd,sc9860-spi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sprd_spi_of_match);
+
+static struct platform_driver sprd_spi_driver = {
+ .driver = {
+ .name = "sprd-spi",
+ .of_match_table = sprd_spi_of_match,
+ .pm = &sprd_spi_pm_ops,
+ },
+ .probe = sprd_spi_probe,
+ .remove = sprd_spi_remove,
+};
+
+module_platform_driver(sprd_spi_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SPI Controller driver");
+MODULE_AUTHOR("Lanqing Liu <lanqing.liu@spreadtrum.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
new file mode 100644
index 000000000..843be8036
--- /dev/null
+++ b/drivers/spi/spi-st-ssc4.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2008-2014 STMicroelectronics Limited
+ *
+ * Author: Angus Clark <Angus.Clark@st.com>
+ * Patrice Chotard <patrice.chotard@st.com>
+ * Lee Jones <lee.jones@linaro.org>
+ *
+ * SPI master mode controller driver, used in STMicroelectronics devices.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+/* SSC registers */
+#define SSC_BRG 0x000
+#define SSC_TBUF 0x004
+#define SSC_RBUF 0x008
+#define SSC_CTL 0x00C
+#define SSC_IEN 0x010
+#define SSC_I2C 0x018
+
+/* SSC Control */
+#define SSC_CTL_DATA_WIDTH_9 0x8
+#define SSC_CTL_DATA_WIDTH_MSK 0xf
+#define SSC_CTL_BM 0xf
+#define SSC_CTL_HB BIT(4)
+#define SSC_CTL_PH BIT(5)
+#define SSC_CTL_PO BIT(6)
+#define SSC_CTL_SR BIT(7)
+#define SSC_CTL_MS BIT(8)
+#define SSC_CTL_EN BIT(9)
+#define SSC_CTL_LPB BIT(10)
+#define SSC_CTL_EN_TX_FIFO BIT(11)
+#define SSC_CTL_EN_RX_FIFO BIT(12)
+#define SSC_CTL_EN_CLST_RX BIT(13)
+
+/* SSC Interrupt Enable */
+#define SSC_IEN_TEEN BIT(2)
+
+#define FIFO_SIZE 8
+
+struct spi_st {
+ /* SSC SPI Controller */
+ void __iomem *base;
+ struct clk *clk;
+ struct device *dev;
+
+ /* SSC SPI current transaction */
+ const u8 *tx_ptr;
+ u8 *rx_ptr;
+ u16 bytes_per_word;
+ unsigned int words_remaining;
+ unsigned int baud;
+ struct completion done;
+};
+
+/* Load the TX FIFO */
+static void ssc_write_tx_fifo(struct spi_st *spi_st)
+{
+ unsigned int count, i;
+ uint32_t word = 0;
+
+ if (spi_st->words_remaining > FIFO_SIZE)
+ count = FIFO_SIZE;
+ else
+ count = spi_st->words_remaining;
+
+ for (i = 0; i < count; i++) {
+ if (spi_st->tx_ptr) {
+ if (spi_st->bytes_per_word == 1) {
+ word = *spi_st->tx_ptr++;
+ } else {
+ word = *spi_st->tx_ptr++;
+ word = *spi_st->tx_ptr++ | (word << 8);
+ }
+ }
+ writel_relaxed(word, spi_st->base + SSC_TBUF);
+ }
+}
+
+/* Read the RX FIFO */
+static void ssc_read_rx_fifo(struct spi_st *spi_st)
+{
+ unsigned int count, i;
+ uint32_t word = 0;
+
+ if (spi_st->words_remaining > FIFO_SIZE)
+ count = FIFO_SIZE;
+ else
+ count = spi_st->words_remaining;
+
+ for (i = 0; i < count; i++) {
+ word = readl_relaxed(spi_st->base + SSC_RBUF);
+
+ if (spi_st->rx_ptr) {
+ if (spi_st->bytes_per_word == 1) {
+ *spi_st->rx_ptr++ = (uint8_t)word;
+ } else {
+ *spi_st->rx_ptr++ = (word >> 8);
+ *spi_st->rx_ptr++ = word & 0xff;
+ }
+ }
+ }
+ spi_st->words_remaining -= count;
+}
+
+static int spi_st_transfer_one(struct spi_master *master,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+ uint32_t ctl = 0;
+
+ /* Setup transfer */
+ spi_st->tx_ptr = t->tx_buf;
+ spi_st->rx_ptr = t->rx_buf;
+
+ if (spi->bits_per_word > 8) {
+ /*
+ * Anything greater than 8 bits-per-word requires 2
+ * bytes-per-word in the RX/TX buffers
+ */
+ spi_st->bytes_per_word = 2;
+ spi_st->words_remaining = t->len / 2;
+
+ } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) {
+ /*
+ * If transfer is even-length, and 8 bits-per-word, then
+ * implement as half-length 16 bits-per-word transfer
+ */
+ spi_st->bytes_per_word = 2;
+ spi_st->words_remaining = t->len / 2;
+
+ /* Set SSC_CTL to 16 bits-per-word */
+ ctl = readl_relaxed(spi_st->base + SSC_CTL);
+ writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL);
+
+ readl_relaxed(spi_st->base + SSC_RBUF);
+
+ } else {
+ spi_st->bytes_per_word = 1;
+ spi_st->words_remaining = t->len;
+ }
+
+ reinit_completion(&spi_st->done);
+
+ /* Start transfer by writing to the TX FIFO */
+ ssc_write_tx_fifo(spi_st);
+ writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN);
+
+ /* Wait for transfer to complete */
+ wait_for_completion(&spi_st->done);
+
+ /* Restore SSC_CTL if necessary */
+ if (ctl)
+ writel_relaxed(ctl, spi_st->base + SSC_CTL);
+
+ spi_finalize_current_transfer(spi->master);
+
+ return t->len;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
+static int spi_st_setup(struct spi_device *spi)
+{
+ struct spi_st *spi_st = spi_master_get_devdata(spi->master);
+ u32 spi_st_clk, sscbrg, var;
+ u32 hz = spi->max_speed_hz;
+
+ if (!hz) {
+ dev_err(&spi->dev, "max_speed_hz unspecified\n");
+ return -EINVAL;
+ }
+
+ if (!spi->cs_gpiod) {
+ dev_err(&spi->dev, "no valid gpio assigned\n");
+ return -EINVAL;
+ }
+
+ spi_st_clk = clk_get_rate(spi_st->clk);
+
+ /* Set SSC_BRF */
+ sscbrg = spi_st_clk / (2 * hz);
+ if (sscbrg < 0x07 || sscbrg > BIT(16)) {
+ dev_err(&spi->dev,
+ "baudrate %d outside valid range %d\n", sscbrg, hz);
+ return -EINVAL;
+ }
+
+ spi_st->baud = spi_st_clk / (2 * sscbrg);
+ if (sscbrg == BIT(16)) /* 16-bit counter wraps */
+ sscbrg = 0x0;
+
+ writel_relaxed(sscbrg, spi_st->base + SSC_BRG);
+
+ dev_dbg(&spi->dev,
+ "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
+ hz, spi_st->baud, sscbrg);
+
+ /* Set SSC_CTL and enable SSC */
+ var = readl_relaxed(spi_st->base + SSC_CTL);
+ var |= SSC_CTL_MS;
+
+ if (spi->mode & SPI_CPOL)
+ var |= SSC_CTL_PO;
+ else
+ var &= ~SSC_CTL_PO;
+
+ if (spi->mode & SPI_CPHA)
+ var |= SSC_CTL_PH;
+ else
+ var &= ~SSC_CTL_PH;
+
+ if ((spi->mode & SPI_LSB_FIRST) == 0)
+ var |= SSC_CTL_HB;
+ else
+ var &= ~SSC_CTL_HB;
+
+ if (spi->mode & SPI_LOOP)
+ var |= SSC_CTL_LPB;
+ else
+ var &= ~SSC_CTL_LPB;
+
+ var &= ~SSC_CTL_DATA_WIDTH_MSK;
+ var |= (spi->bits_per_word - 1);
+
+ var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
+ var |= SSC_CTL_EN;
+
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ /* Clear the status register */
+ readl_relaxed(spi_st->base + SSC_RBUF);
+
+ return 0;
+}
+
+/* Interrupt fired when TX shift register becomes empty */
+static irqreturn_t spi_st_irq(int irq, void *dev_id)
+{
+ struct spi_st *spi_st = (struct spi_st *)dev_id;
+
+ /* Read RX FIFO */
+ ssc_read_rx_fifo(spi_st);
+
+ /* Fill TX FIFO */
+ if (spi_st->words_remaining) {
+ ssc_write_tx_fifo(spi_st);
+ } else {
+ /* TX/RX complete */
+ writel_relaxed(0x0, spi_st->base + SSC_IEN);
+ /*
+ * read SSC_IEN to ensure that this bit is set
+ * before re-enabling interrupt
+ */
+ readl(spi_st->base + SSC_IEN);
+ complete(&spi_st->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int spi_st_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_master *master;
+ struct spi_st *spi_st;
+ int irq, ret = 0;
+ u32 var;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
+ if (!master)
+ return -ENOMEM;
+
+ master->dev.of_node = np;
+ master->mode_bits = MODEBITS;
+ master->setup = spi_st_setup;
+ master->transfer_one = spi_st_transfer_one;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+ master->use_gpio_descriptors = true;
+ spi_st = spi_master_get_devdata(master);
+
+ spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
+ if (IS_ERR(spi_st->clk)) {
+ dev_err(&pdev->dev, "Unable to request clock\n");
+ ret = PTR_ERR(spi_st->clk);
+ goto put_master;
+ }
+
+ ret = clk_prepare_enable(spi_st->clk);
+ if (ret)
+ goto put_master;
+
+ init_completion(&spi_st->done);
+
+ /* Get resources */
+ spi_st->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spi_st->base)) {
+ ret = PTR_ERR(spi_st->base);
+ goto clk_disable;
+ }
+
+ /* Disable I2C and Reset SSC */
+ writel_relaxed(0x0, spi_st->base + SSC_I2C);
+ var = readw_relaxed(spi_st->base + SSC_CTL);
+ var |= SSC_CTL_SR;
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ udelay(1);
+ var = readl_relaxed(spi_st->base + SSC_CTL);
+ var &= ~SSC_CTL_SR;
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ /* Set SSC into slave mode before reconfiguring PIO pins */
+ var = readl_relaxed(spi_st->base + SSC_CTL);
+ var &= ~SSC_CTL_MS;
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "IRQ missing or invalid\n");
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0,
+ pdev->name, spi_st);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %d\n", irq);
+ goto clk_disable;
+ }
+
+ /* by default the device is on */
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ platform_set_drvdata(pdev, master);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register master\n");
+ goto rpm_disable;
+ }
+
+ return 0;
+
+rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+clk_disable:
+ clk_disable_unprepare(spi_st->clk);
+put_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int spi_st_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(spi_st->clk);
+
+ pinctrl_pm_select_sleep_state(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_st_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+
+ writel_relaxed(0, spi_st->base + SSC_IEN);
+ pinctrl_pm_select_sleep_state(dev);
+
+ clk_disable_unprepare(spi_st->clk);
+
+ return 0;
+}
+
+static int spi_st_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(spi_st->clk);
+ pinctrl_pm_select_default_state(dev);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_st_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int spi_st_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_resume(master);
+ if (ret)
+ return ret;
+
+ return pm_runtime_force_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops spi_st_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
+ SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
+};
+
+static const struct of_device_id stm_spi_match[] = {
+ { .compatible = "st,comms-ssc4-spi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm_spi_match);
+
+static struct platform_driver spi_st_driver = {
+ .driver = {
+ .name = "spi-st",
+ .pm = &spi_st_pm,
+ .of_match_table = of_match_ptr(stm_spi_match),
+ },
+ .probe = spi_st_probe,
+ .remove = spi_st_remove,
+};
+module_platform_driver(spi_st_driver);
+
+MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
+MODULE_DESCRIPTION("STM SSC SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
new file mode 100644
index 000000000..9131660c1
--- /dev/null
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -0,0 +1,982 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi-mem.h>
+
+#define QSPI_CR 0x00
+#define CR_EN BIT(0)
+#define CR_ABORT BIT(1)
+#define CR_DMAEN BIT(2)
+#define CR_TCEN BIT(3)
+#define CR_SSHIFT BIT(4)
+#define CR_DFM BIT(6)
+#define CR_FSEL BIT(7)
+#define CR_FTHRES_SHIFT 8
+#define CR_TEIE BIT(16)
+#define CR_TCIE BIT(17)
+#define CR_FTIE BIT(18)
+#define CR_SMIE BIT(19)
+#define CR_TOIE BIT(20)
+#define CR_APMS BIT(22)
+#define CR_PRESC_MASK GENMASK(31, 24)
+
+#define QSPI_DCR 0x04
+#define DCR_FSIZE_MASK GENMASK(20, 16)
+
+#define QSPI_SR 0x08
+#define SR_TEF BIT(0)
+#define SR_TCF BIT(1)
+#define SR_FTF BIT(2)
+#define SR_SMF BIT(3)
+#define SR_TOF BIT(4)
+#define SR_BUSY BIT(5)
+#define SR_FLEVEL_MASK GENMASK(13, 8)
+
+#define QSPI_FCR 0x0c
+#define FCR_CTEF BIT(0)
+#define FCR_CTCF BIT(1)
+#define FCR_CSMF BIT(3)
+
+#define QSPI_DLR 0x10
+
+#define QSPI_CCR 0x14
+#define CCR_INST_MASK GENMASK(7, 0)
+#define CCR_IMODE_MASK GENMASK(9, 8)
+#define CCR_ADMODE_MASK GENMASK(11, 10)
+#define CCR_ADSIZE_MASK GENMASK(13, 12)
+#define CCR_DCYC_MASK GENMASK(22, 18)
+#define CCR_DMODE_MASK GENMASK(25, 24)
+#define CCR_FMODE_MASK GENMASK(27, 26)
+#define CCR_FMODE_INDW (0U << 26)
+#define CCR_FMODE_INDR (1U << 26)
+#define CCR_FMODE_APM (2U << 26)
+#define CCR_FMODE_MM (3U << 26)
+#define CCR_BUSWIDTH_0 0x0
+#define CCR_BUSWIDTH_1 0x1
+#define CCR_BUSWIDTH_2 0x2
+#define CCR_BUSWIDTH_4 0x3
+
+#define QSPI_AR 0x18
+#define QSPI_ABR 0x1c
+#define QSPI_DR 0x20
+#define QSPI_PSMKR 0x24
+#define QSPI_PSMAR 0x28
+#define QSPI_PIR 0x2c
+#define QSPI_LPTR 0x30
+
+#define STM32_QSPI_MAX_MMAP_SZ SZ_256M
+#define STM32_QSPI_MAX_NORCHIP 2
+
+#define STM32_FIFO_TIMEOUT_US 30000
+#define STM32_BUSY_TIMEOUT_US 100000
+#define STM32_ABT_TIMEOUT_US 100000
+#define STM32_COMP_TIMEOUT_MS 1000
+#define STM32_AUTOSUSPEND_DELAY -1
+
+struct stm32_qspi_flash {
+ u32 cs;
+ u32 presc;
+};
+
+struct stm32_qspi {
+ struct device *dev;
+ struct spi_controller *ctrl;
+ phys_addr_t phys_base;
+ void __iomem *io_base;
+ void __iomem *mm_base;
+ resource_size_t mm_size;
+ struct clk *clk;
+ u32 clk_rate;
+ struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
+ struct completion data_completion;
+ struct completion match_completion;
+ u32 fmode;
+
+ struct dma_chan *dma_chtx;
+ struct dma_chan *dma_chrx;
+ struct completion dma_completion;
+
+ u32 cr_reg;
+ u32 dcr_reg;
+ unsigned long status_timeout;
+
+ /*
+ * to protect device configuration, could be different between
+ * 2 flash access (bk1, bk2)
+ */
+ struct mutex lock;
+};
+
+static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
+{
+ struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
+ u32 cr, sr;
+
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+ sr = readl_relaxed(qspi->io_base + QSPI_SR);
+
+ if (cr & CR_SMIE && sr & SR_SMF) {
+ /* disable irq */
+ cr &= ~CR_SMIE;
+ writel_relaxed(cr, qspi->io_base + QSPI_CR);
+ complete(&qspi->match_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ if (sr & (SR_TEF | SR_TCF)) {
+ /* disable irq */
+ cr &= ~CR_TCIE & ~CR_TEIE;
+ writel_relaxed(cr, qspi->io_base + QSPI_CR);
+ complete(&qspi->data_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
+{
+ *val = readb_relaxed(addr);
+}
+
+static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
+{
+ writeb_relaxed(*val, addr);
+}
+
+static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
+ const struct spi_mem_op *op)
+{
+ void (*tx_fifo)(u8 *val, void __iomem *addr);
+ u32 len = op->data.nbytes, sr;
+ u8 *buf;
+ int ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ tx_fifo = stm32_qspi_read_fifo;
+ buf = op->data.buf.in;
+
+ } else {
+ tx_fifo = stm32_qspi_write_fifo;
+ buf = (u8 *)op->data.buf.out;
+ }
+
+ while (len--) {
+ ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR,
+ sr, (sr & SR_FTF), 1,
+ STM32_FIFO_TIMEOUT_US);
+ if (ret) {
+ dev_err(qspi->dev, "fifo timeout (len:%d stat:%#x)\n",
+ len, sr);
+ return ret;
+ }
+ tx_fifo(buf++, qspi->io_base + QSPI_DR);
+ }
+
+ return 0;
+}
+
+static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
+ const struct spi_mem_op *op)
+{
+ memcpy_fromio(op->data.buf.in, qspi->mm_base + op->addr.val,
+ op->data.nbytes);
+ return 0;
+}
+
+static void stm32_qspi_dma_callback(void *arg)
+{
+ struct completion *dma_completion = arg;
+
+ complete(dma_completion);
+}
+
+static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
+ const struct spi_mem_op *op)
+{
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction dma_dir;
+ struct dma_chan *dma_ch;
+ struct sg_table sgt;
+ dma_cookie_t cookie;
+ u32 cr, t_out;
+ int err;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_dir = DMA_DEV_TO_MEM;
+ dma_ch = qspi->dma_chrx;
+ } else {
+ dma_dir = DMA_MEM_TO_DEV;
+ dma_ch = qspi->dma_chtx;
+ }
+
+ /*
+ * spi_map_buf return -EINVAL if the buffer is not DMA-able
+ * (DMA-able: in vmalloc | kmap | virt_addr_valid)
+ */
+ err = spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt);
+ if (err)
+ return err;
+
+ desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
+ dma_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+
+ reinit_completion(&qspi->dma_completion);
+ desc->callback = stm32_qspi_dma_callback;
+ desc->callback_param = &qspi->dma_completion;
+ cookie = dmaengine_submit(desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ goto out;
+
+ dma_async_issue_pending(dma_ch);
+
+ writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
+
+ t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
+ if (!wait_for_completion_timeout(&qspi->dma_completion,
+ msecs_to_jiffies(t_out)))
+ err = -ETIMEDOUT;
+
+ if (err)
+ dmaengine_terminate_all(dma_ch);
+
+out:
+ writel_relaxed(cr & ~CR_DMAEN, qspi->io_base + QSPI_CR);
+out_unmap:
+ spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt);
+
+ return err;
+}
+
+static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
+{
+ if (!op->data.nbytes)
+ return 0;
+
+ if (qspi->fmode == CCR_FMODE_MM)
+ return stm32_qspi_tx_mm(qspi, op);
+ else if (((op->data.dir == SPI_MEM_DATA_IN && qspi->dma_chrx) ||
+ (op->data.dir == SPI_MEM_DATA_OUT && qspi->dma_chtx)) &&
+ op->data.nbytes > 4)
+ if (!stm32_qspi_tx_dma(qspi, op))
+ return 0;
+
+ return stm32_qspi_tx_poll(qspi, op);
+}
+
+static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
+{
+ u32 sr;
+
+ return readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr,
+ !(sr & SR_BUSY), 1,
+ STM32_BUSY_TIMEOUT_US);
+}
+
+static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi)
+{
+ u32 cr, sr;
+ int err = 0;
+
+ if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) ||
+ qspi->fmode == CCR_FMODE_APM)
+ goto out;
+
+ reinit_completion(&qspi->data_completion);
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+ writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
+
+ if (!wait_for_completion_timeout(&qspi->data_completion,
+ msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
+ err = -ETIMEDOUT;
+ } else {
+ sr = readl_relaxed(qspi->io_base + QSPI_SR);
+ if (sr & SR_TEF)
+ err = -EIO;
+ }
+
+out:
+ /* clear flags */
+ writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
+ if (!err)
+ err = stm32_qspi_wait_nobusy(qspi);
+
+ return err;
+}
+
+static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
+{
+ u32 cr;
+
+ reinit_completion(&qspi->match_completion);
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+ writel_relaxed(cr | CR_SMIE, qspi->io_base + QSPI_CR);
+
+ if (!wait_for_completion_timeout(&qspi->match_completion,
+ msecs_to_jiffies(qspi->status_timeout)))
+ return -ETIMEDOUT;
+
+ writel_relaxed(FCR_CSMF, qspi->io_base + QSPI_FCR);
+
+ return 0;
+}
+
+static int stm32_qspi_get_mode(u8 buswidth)
+{
+ if (buswidth == 4)
+ return CCR_BUSWIDTH_4;
+
+ return buswidth;
+}
+
+static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(spi->master);
+ struct stm32_qspi_flash *flash = &qspi->flash[spi->chip_select];
+ u32 ccr, cr;
+ int timeout, err = 0, err_poll_status = 0;
+
+ dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.val, op->data.nbytes);
+
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+ cr &= ~CR_PRESC_MASK & ~CR_FSEL;
+ cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
+ cr |= FIELD_PREP(CR_FSEL, flash->cs);
+ writel_relaxed(cr, qspi->io_base + QSPI_CR);
+
+ if (op->data.nbytes)
+ writel_relaxed(op->data.nbytes - 1,
+ qspi->io_base + QSPI_DLR);
+
+ ccr = qspi->fmode;
+ ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode);
+ ccr |= FIELD_PREP(CCR_IMODE_MASK,
+ stm32_qspi_get_mode(op->cmd.buswidth));
+
+ if (op->addr.nbytes) {
+ ccr |= FIELD_PREP(CCR_ADMODE_MASK,
+ stm32_qspi_get_mode(op->addr.buswidth));
+ ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
+ }
+
+ if (op->dummy.nbytes)
+ ccr |= FIELD_PREP(CCR_DCYC_MASK,
+ op->dummy.nbytes * 8 / op->dummy.buswidth);
+
+ if (op->data.nbytes) {
+ ccr |= FIELD_PREP(CCR_DMODE_MASK,
+ stm32_qspi_get_mode(op->data.buswidth));
+ }
+
+ writel_relaxed(ccr, qspi->io_base + QSPI_CCR);
+
+ if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
+ writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
+
+ if (qspi->fmode == CCR_FMODE_APM)
+ err_poll_status = stm32_qspi_wait_poll_status(qspi);
+
+ err = stm32_qspi_tx(qspi, op);
+
+ /*
+ * Abort in:
+ * -error case
+ * -read memory map: prefetching must be stopped if we read the last
+ * byte of device (device size - fifo size). like device size is not
+ * knows, the prefetching is always stop.
+ */
+ if (err || err_poll_status || qspi->fmode == CCR_FMODE_MM)
+ goto abort;
+
+ /* wait end of tx in indirect mode */
+ err = stm32_qspi_wait_cmd(qspi);
+ if (err)
+ goto abort;
+
+ return 0;
+
+abort:
+ cr = readl_relaxed(qspi->io_base + QSPI_CR) | CR_ABORT;
+ writel_relaxed(cr, qspi->io_base + QSPI_CR);
+
+ /* wait clear of abort bit by hw */
+ timeout = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_CR,
+ cr, !(cr & CR_ABORT), 1,
+ STM32_ABT_TIMEOUT_US);
+
+ writel_relaxed(FCR_CTCF | FCR_CSMF, qspi->io_base + QSPI_FCR);
+
+ if (err || err_poll_status || timeout)
+ dev_err(qspi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
+ __func__, err, err_poll_status, timeout);
+
+ return err;
+}
+
+static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ if (!spi_mem_supports_op(mem, op))
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&qspi->lock);
+
+ writel_relaxed(mask, qspi->io_base + QSPI_PSMKR);
+ writel_relaxed(match, qspi->io_base + QSPI_PSMAR);
+ qspi->fmode = CCR_FMODE_APM;
+ qspi->status_timeout = timeout_ms;
+
+ ret = stm32_qspi_send(mem->spi, op);
+ mutex_unlock(&qspi->lock);
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
+ return ret;
+}
+
+static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&qspi->lock);
+ if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
+ qspi->fmode = CCR_FMODE_INDR;
+ else
+ qspi->fmode = CCR_FMODE_INDW;
+
+ ret = stm32_qspi_send(mem->spi, op);
+ mutex_unlock(&qspi->lock);
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
+ return ret;
+}
+
+static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
+
+ if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
+ return -EOPNOTSUPP;
+
+ /* should never happen, as mm_base == null is an error probe exit condition */
+ if (!qspi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
+
+ if (!qspi->mm_size)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct spi_mem_op op;
+ u32 addr_max;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&qspi->lock);
+ /* make a local copy of desc op_tmpl and complete dirmap rdesc
+ * spi_mem_op template with offs, len and *buf in order to get
+ * all needed transfer information into struct spi_mem_op
+ */
+ memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
+ dev_dbg(qspi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
+
+ op.data.nbytes = len;
+ op.addr.val = desc->info.offset + offs;
+ op.data.buf.in = buf;
+
+ addr_max = op.addr.val + op.data.nbytes + 1;
+ if (addr_max < qspi->mm_size && op.addr.buswidth)
+ qspi->fmode = CCR_FMODE_MM;
+ else
+ qspi->fmode = CCR_FMODE_INDR;
+
+ ret = stm32_qspi_send(desc->mem->spi, &op);
+ mutex_unlock(&qspi->lock);
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
+ return ret ?: len;
+}
+
+static int stm32_qspi_transfer_one_message(struct spi_controller *ctrl,
+ struct spi_message *msg)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
+ struct spi_transfer *transfer;
+ struct spi_device *spi = msg->spi;
+ struct spi_mem_op op;
+ int ret = 0;
+
+ if (!spi->cs_gpiod)
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&qspi->lock);
+
+ gpiod_set_value_cansleep(spi->cs_gpiod, true);
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+ u8 dummy_bytes = 0;
+
+ memset(&op, 0, sizeof(op));
+
+ dev_dbg(qspi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
+ transfer->tx_buf, transfer->tx_nbits,
+ transfer->rx_buf, transfer->rx_nbits,
+ transfer->len, transfer->dummy_data);
+
+ /*
+ * QSPI hardware supports dummy bytes transfer.
+ * If current transfer is dummy byte, merge it with the next
+ * transfer in order to take into account QSPI block constraint
+ */
+ if (transfer->dummy_data) {
+ op.dummy.buswidth = transfer->tx_nbits;
+ op.dummy.nbytes = transfer->len;
+ dummy_bytes = transfer->len;
+
+ /* if happens, means that message is not correctly built */
+ if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
+ ret = -EINVAL;
+ goto end_of_transfer;
+ }
+
+ transfer = list_next_entry(transfer, transfer_list);
+ }
+
+ op.data.nbytes = transfer->len;
+
+ if (transfer->rx_buf) {
+ qspi->fmode = CCR_FMODE_INDR;
+ op.data.buswidth = transfer->rx_nbits;
+ op.data.dir = SPI_MEM_DATA_IN;
+ op.data.buf.in = transfer->rx_buf;
+ } else {
+ qspi->fmode = CCR_FMODE_INDW;
+ op.data.buswidth = transfer->tx_nbits;
+ op.data.dir = SPI_MEM_DATA_OUT;
+ op.data.buf.out = transfer->tx_buf;
+ }
+
+ ret = stm32_qspi_send(spi, &op);
+ if (ret)
+ goto end_of_transfer;
+
+ msg->actual_length += transfer->len + dummy_bytes;
+ }
+
+end_of_transfer:
+ gpiod_set_value_cansleep(spi->cs_gpiod, false);
+
+ mutex_unlock(&qspi->lock);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctrl);
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
+ return ret;
+}
+
+static int stm32_qspi_setup(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->master;
+ struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
+ struct stm32_qspi_flash *flash;
+ u32 presc, mode;
+ int ret;
+
+ if (ctrl->busy)
+ return -EBUSY;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
+ if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
+ ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
+ gpiod_count(qspi->dev, "cs") == -ENOENT)) {
+ dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
+ dev_err(qspi->dev, "configuration not supported\n");
+
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
+ return ret;
+
+ presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
+
+ flash = &qspi->flash[spi->chip_select];
+ flash->cs = spi->chip_select;
+ flash->presc = presc;
+
+ mutex_lock(&qspi->lock);
+ qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
+
+ /*
+ * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
+ * are both set in spi->mode and "cs-gpios" properties is found in DT
+ */
+ if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
+ qspi->cr_reg |= CR_DFM;
+ dev_dbg(qspi->dev, "Dual flash mode enable");
+ }
+
+ writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
+
+ /* set dcr fsize to max address */
+ qspi->dcr_reg = DCR_FSIZE_MASK;
+ writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
+ mutex_unlock(&qspi->lock);
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
+ return 0;
+}
+
+static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
+{
+ struct dma_slave_config dma_cfg;
+ struct device *dev = qspi->dev;
+ int ret = 0;
+
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.src_addr = qspi->phys_base + QSPI_DR;
+ dma_cfg.dst_addr = qspi->phys_base + QSPI_DR;
+ dma_cfg.src_maxburst = 4;
+ dma_cfg.dst_maxburst = 4;
+
+ qspi->dma_chrx = dma_request_chan(dev, "rx");
+ if (IS_ERR(qspi->dma_chrx)) {
+ ret = PTR_ERR(qspi->dma_chrx);
+ qspi->dma_chrx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto out;
+ } else {
+ if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
+ dev_err(dev, "dma rx config failed\n");
+ dma_release_channel(qspi->dma_chrx);
+ qspi->dma_chrx = NULL;
+ }
+ }
+
+ qspi->dma_chtx = dma_request_chan(dev, "tx");
+ if (IS_ERR(qspi->dma_chtx)) {
+ ret = PTR_ERR(qspi->dma_chtx);
+ qspi->dma_chtx = NULL;
+ } else {
+ if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
+ dev_err(dev, "dma tx config failed\n");
+ dma_release_channel(qspi->dma_chtx);
+ qspi->dma_chtx = NULL;
+ }
+ }
+
+out:
+ init_completion(&qspi->dma_completion);
+
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
+}
+
+static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
+{
+ if (qspi->dma_chtx)
+ dma_release_channel(qspi->dma_chtx);
+ if (qspi->dma_chrx)
+ dma_release_channel(qspi->dma_chrx);
+}
+
+/*
+ * no special host constraint, so use default spi_mem_default_supports_op
+ * to check supported mode.
+ */
+static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
+ .exec_op = stm32_qspi_exec_op,
+ .dirmap_create = stm32_qspi_dirmap_create,
+ .dirmap_read = stm32_qspi_dirmap_read,
+ .poll_status = stm32_qspi_poll_status,
+};
+
+static int stm32_qspi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct reset_control *rstc;
+ struct stm32_qspi *qspi;
+ struct resource *res;
+ int ret, irq;
+
+ ctrl = devm_spi_alloc_master(dev, sizeof(*qspi));
+ if (!ctrl)
+ return -ENOMEM;
+
+ qspi = spi_controller_get_devdata(ctrl);
+ qspi->ctrl = ctrl;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
+ qspi->io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->io_base))
+ return PTR_ERR(qspi->io_base);
+
+ qspi->phys_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
+ qspi->mm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->mm_base))
+ return PTR_ERR(qspi->mm_base);
+
+ qspi->mm_size = resource_size(res);
+ if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
+ dev_name(dev), qspi);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ init_completion(&qspi->data_completion);
+ init_completion(&qspi->match_completion);
+
+ qspi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(qspi->clk))
+ return PTR_ERR(qspi->clk);
+
+ qspi->clk_rate = clk_get_rate(qspi->clk);
+ if (!qspi->clk_rate)
+ return -EINVAL;
+
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ if (ret == -EPROBE_DEFER)
+ goto err_clk_disable;
+ } else {
+ reset_control_assert(rstc);
+ udelay(2);
+ reset_control_deassert(rstc);
+ }
+
+ qspi->dev = dev;
+ platform_set_drvdata(pdev, qspi);
+ ret = stm32_qspi_dma_setup(qspi);
+ if (ret)
+ goto err_dma_free;
+
+ mutex_init(&qspi->lock);
+
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL
+ | SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_OCTAL;
+ ctrl->setup = stm32_qspi_setup;
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &stm32_qspi_mem_ops;
+ ctrl->use_gpio_descriptors = true;
+ ctrl->transfer_one_message = stm32_qspi_transfer_one_message;
+ ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
+ ctrl->dev.of_node = dev->of_node;
+
+ pm_runtime_set_autosuspend_delay(dev, STM32_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_noresume(dev);
+
+ ret = spi_register_master(ctrl);
+ if (ret)
+ goto err_pm_runtime_free;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+err_pm_runtime_free:
+ pm_runtime_get_sync(qspi->dev);
+ /* disable qspi */
+ writel_relaxed(0, qspi->io_base + QSPI_CR);
+ mutex_destroy(&qspi->lock);
+ pm_runtime_put_noidle(qspi->dev);
+ pm_runtime_disable(qspi->dev);
+ pm_runtime_set_suspended(qspi->dev);
+ pm_runtime_dont_use_autosuspend(qspi->dev);
+err_dma_free:
+ stm32_qspi_dma_free(qspi);
+err_clk_disable:
+ clk_disable_unprepare(qspi->clk);
+
+ return ret;
+}
+
+static int stm32_qspi_remove(struct platform_device *pdev)
+{
+ struct stm32_qspi *qspi = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(qspi->dev);
+ spi_unregister_master(qspi->ctrl);
+ /* disable qspi */
+ writel_relaxed(0, qspi->io_base + QSPI_CR);
+ stm32_qspi_dma_free(qspi);
+ mutex_destroy(&qspi->lock);
+ pm_runtime_put_noidle(qspi->dev);
+ pm_runtime_disable(qspi->dev);
+ pm_runtime_set_suspended(qspi->dev);
+ pm_runtime_dont_use_autosuspend(qspi->dev);
+ clk_disable_unprepare(qspi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev)
+{
+ struct stm32_qspi *qspi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(qspi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_qspi_runtime_resume(struct device *dev)
+{
+ struct stm32_qspi *qspi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(qspi->clk);
+}
+
+static int __maybe_unused stm32_qspi_suspend(struct device *dev)
+{
+ pinctrl_pm_select_sleep_state(dev);
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused stm32_qspi_resume(struct device *dev)
+{
+ struct stm32_qspi *qspi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
+ writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops stm32_qspi_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend,
+ stm32_qspi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume)
+};
+
+static const struct of_device_id stm32_qspi_match[] = {
+ {.compatible = "st,stm32f469-qspi"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_qspi_match);
+
+static struct platform_driver stm32_qspi_driver = {
+ .probe = stm32_qspi_probe,
+ .remove = stm32_qspi_remove,
+ .driver = {
+ .name = "stm32-qspi",
+ .of_match_table = stm32_qspi_match,
+ .pm = &stm32_qspi_pm_ops,
+ },
+};
+module_platform_driver(stm32_qspi_driver);
+
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
new file mode 100644
index 000000000..122418155
--- /dev/null
+++ b/drivers/spi/spi-stm32.c
@@ -0,0 +1,2047 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// STMicroelectronics STM32 SPI Controller driver (master mode only)
+//
+// Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+// Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "spi_stm32"
+
+/* STM32F4 SPI registers */
+#define STM32F4_SPI_CR1 0x00
+#define STM32F4_SPI_CR2 0x04
+#define STM32F4_SPI_SR 0x08
+#define STM32F4_SPI_DR 0x0C
+#define STM32F4_SPI_I2SCFGR 0x1C
+
+/* STM32F4_SPI_CR1 bit fields */
+#define STM32F4_SPI_CR1_CPHA BIT(0)
+#define STM32F4_SPI_CR1_CPOL BIT(1)
+#define STM32F4_SPI_CR1_MSTR BIT(2)
+#define STM32F4_SPI_CR1_BR_SHIFT 3
+#define STM32F4_SPI_CR1_BR GENMASK(5, 3)
+#define STM32F4_SPI_CR1_SPE BIT(6)
+#define STM32F4_SPI_CR1_LSBFRST BIT(7)
+#define STM32F4_SPI_CR1_SSI BIT(8)
+#define STM32F4_SPI_CR1_SSM BIT(9)
+#define STM32F4_SPI_CR1_RXONLY BIT(10)
+#define STM32F4_SPI_CR1_DFF BIT(11)
+#define STM32F4_SPI_CR1_CRCNEXT BIT(12)
+#define STM32F4_SPI_CR1_CRCEN BIT(13)
+#define STM32F4_SPI_CR1_BIDIOE BIT(14)
+#define STM32F4_SPI_CR1_BIDIMODE BIT(15)
+#define STM32F4_SPI_CR1_BR_MIN 0
+#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3)
+
+/* STM32F4_SPI_CR2 bit fields */
+#define STM32F4_SPI_CR2_RXDMAEN BIT(0)
+#define STM32F4_SPI_CR2_TXDMAEN BIT(1)
+#define STM32F4_SPI_CR2_SSOE BIT(2)
+#define STM32F4_SPI_CR2_FRF BIT(4)
+#define STM32F4_SPI_CR2_ERRIE BIT(5)
+#define STM32F4_SPI_CR2_RXNEIE BIT(6)
+#define STM32F4_SPI_CR2_TXEIE BIT(7)
+
+/* STM32F4_SPI_SR bit fields */
+#define STM32F4_SPI_SR_RXNE BIT(0)
+#define STM32F4_SPI_SR_TXE BIT(1)
+#define STM32F4_SPI_SR_CHSIDE BIT(2)
+#define STM32F4_SPI_SR_UDR BIT(3)
+#define STM32F4_SPI_SR_CRCERR BIT(4)
+#define STM32F4_SPI_SR_MODF BIT(5)
+#define STM32F4_SPI_SR_OVR BIT(6)
+#define STM32F4_SPI_SR_BSY BIT(7)
+#define STM32F4_SPI_SR_FRE BIT(8)
+
+/* STM32F4_SPI_I2SCFGR bit fields */
+#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11)
+
+/* STM32F4 SPI Baud Rate min/max divisor */
+#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN)
+#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX)
+
+/* STM32H7 SPI registers */
+#define STM32H7_SPI_CR1 0x00
+#define STM32H7_SPI_CR2 0x04
+#define STM32H7_SPI_CFG1 0x08
+#define STM32H7_SPI_CFG2 0x0C
+#define STM32H7_SPI_IER 0x10
+#define STM32H7_SPI_SR 0x14
+#define STM32H7_SPI_IFCR 0x18
+#define STM32H7_SPI_TXDR 0x20
+#define STM32H7_SPI_RXDR 0x30
+#define STM32H7_SPI_I2SCFGR 0x50
+
+/* STM32H7_SPI_CR1 bit fields */
+#define STM32H7_SPI_CR1_SPE BIT(0)
+#define STM32H7_SPI_CR1_MASRX BIT(8)
+#define STM32H7_SPI_CR1_CSTART BIT(9)
+#define STM32H7_SPI_CR1_CSUSP BIT(10)
+#define STM32H7_SPI_CR1_HDDIR BIT(11)
+#define STM32H7_SPI_CR1_SSI BIT(12)
+
+/* STM32H7_SPI_CR2 bit fields */
+#define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0)
+#define STM32H7_SPI_TSIZE_MAX GENMASK(15, 0)
+
+/* STM32H7_SPI_CFG1 bit fields */
+#define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0)
+#define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5)
+#define STM32H7_SPI_CFG1_RXDMAEN BIT(14)
+#define STM32H7_SPI_CFG1_TXDMAEN BIT(15)
+#define STM32H7_SPI_CFG1_MBR GENMASK(30, 28)
+#define STM32H7_SPI_CFG1_MBR_SHIFT 28
+#define STM32H7_SPI_CFG1_MBR_MIN 0
+#define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
+
+/* STM32H7_SPI_CFG2 bit fields */
+#define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4)
+#define STM32H7_SPI_CFG2_COMM GENMASK(18, 17)
+#define STM32H7_SPI_CFG2_SP GENMASK(21, 19)
+#define STM32H7_SPI_CFG2_MASTER BIT(22)
+#define STM32H7_SPI_CFG2_LSBFRST BIT(23)
+#define STM32H7_SPI_CFG2_CPHA BIT(24)
+#define STM32H7_SPI_CFG2_CPOL BIT(25)
+#define STM32H7_SPI_CFG2_SSM BIT(26)
+#define STM32H7_SPI_CFG2_AFCNTR BIT(31)
+
+/* STM32H7_SPI_IER bit fields */
+#define STM32H7_SPI_IER_RXPIE BIT(0)
+#define STM32H7_SPI_IER_TXPIE BIT(1)
+#define STM32H7_SPI_IER_DXPIE BIT(2)
+#define STM32H7_SPI_IER_EOTIE BIT(3)
+#define STM32H7_SPI_IER_TXTFIE BIT(4)
+#define STM32H7_SPI_IER_OVRIE BIT(6)
+#define STM32H7_SPI_IER_MODFIE BIT(9)
+#define STM32H7_SPI_IER_ALL GENMASK(10, 0)
+
+/* STM32H7_SPI_SR bit fields */
+#define STM32H7_SPI_SR_RXP BIT(0)
+#define STM32H7_SPI_SR_TXP BIT(1)
+#define STM32H7_SPI_SR_EOT BIT(3)
+#define STM32H7_SPI_SR_OVR BIT(6)
+#define STM32H7_SPI_SR_MODF BIT(9)
+#define STM32H7_SPI_SR_SUSP BIT(11)
+#define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13)
+#define STM32H7_SPI_SR_RXWNE BIT(15)
+
+/* STM32H7_SPI_IFCR bit fields */
+#define STM32H7_SPI_IFCR_ALL GENMASK(11, 3)
+
+/* STM32H7_SPI_I2SCFGR bit fields */
+#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0)
+
+/* STM32H7 SPI Master Baud Rate min/max divisor */
+#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN)
+#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX)
+
+/* STM32H7 SPI Communication mode */
+#define STM32H7_SPI_FULL_DUPLEX 0
+#define STM32H7_SPI_SIMPLEX_TX 1
+#define STM32H7_SPI_SIMPLEX_RX 2
+#define STM32H7_SPI_HALF_DUPLEX 3
+
+/* SPI Communication type */
+#define SPI_FULL_DUPLEX 0
+#define SPI_SIMPLEX_TX 1
+#define SPI_SIMPLEX_RX 2
+#define SPI_3WIRE_TX 3
+#define SPI_3WIRE_RX 4
+
+#define STM32_SPI_AUTOSUSPEND_DELAY 1 /* 1 ms */
+
+/*
+ * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
+ * without fifo buffers.
+ */
+#define SPI_DMA_MIN_BYTES 16
+
+/**
+ * struct stm32_spi_reg - stm32 SPI register & bitfield desc
+ * @reg: register offset
+ * @mask: bitfield mask
+ * @shift: left shift
+ */
+struct stm32_spi_reg {
+ int reg;
+ int mask;
+ int shift;
+};
+
+/**
+ * struct stm32_spi_regspec - stm32 registers definition, compatible dependent data
+ * @en: enable register and SPI enable bit
+ * @dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit
+ * @dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit
+ * @cpol: clock polarity register and polarity bit
+ * @cpha: clock phase register and phase bit
+ * @lsb_first: LSB transmitted first register and bit
+ * @br: baud rate register and bitfields
+ * @rx: SPI RX data register
+ * @tx: SPI TX data register
+ */
+struct stm32_spi_regspec {
+ const struct stm32_spi_reg en;
+ const struct stm32_spi_reg dma_rx_en;
+ const struct stm32_spi_reg dma_tx_en;
+ const struct stm32_spi_reg cpol;
+ const struct stm32_spi_reg cpha;
+ const struct stm32_spi_reg lsb_first;
+ const struct stm32_spi_reg br;
+ const struct stm32_spi_reg rx;
+ const struct stm32_spi_reg tx;
+};
+
+struct stm32_spi;
+
+/**
+ * struct stm32_spi_cfg - stm32 compatible configuration data
+ * @regs: registers descriptions
+ * @get_fifo_size: routine to get fifo size
+ * @get_bpw_mask: routine to get bits per word mask
+ * @disable: routine to disable controller
+ * @config: routine to configure controller as SPI Master
+ * @set_bpw: routine to configure registers to for bits per word
+ * @set_mode: routine to configure registers to desired mode
+ * @set_data_idleness: optional routine to configure registers to desired idle
+ * time between frames (if driver has this functionality)
+ * @set_number_of_data: optional routine to configure registers to desired
+ * number of data (if driver has this functionality)
+ * @transfer_one_dma_start: routine to start transfer a single spi_transfer
+ * using DMA
+ * @dma_rx_cb: routine to call after DMA RX channel operation is complete
+ * @dma_tx_cb: routine to call after DMA TX channel operation is complete
+ * @transfer_one_irq: routine to configure interrupts for driver
+ * @irq_handler_event: Interrupt handler for SPI controller events
+ * @irq_handler_thread: thread of interrupt handler for SPI controller
+ * @baud_rate_div_min: minimum baud rate divisor
+ * @baud_rate_div_max: maximum baud rate divisor
+ * @has_fifo: boolean to know if fifo is used for driver
+ * @flags: compatible specific SPI controller flags used at registration time
+ */
+struct stm32_spi_cfg {
+ const struct stm32_spi_regspec *regs;
+ int (*get_fifo_size)(struct stm32_spi *spi);
+ int (*get_bpw_mask)(struct stm32_spi *spi);
+ void (*disable)(struct stm32_spi *spi);
+ int (*config)(struct stm32_spi *spi);
+ void (*set_bpw)(struct stm32_spi *spi);
+ int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type);
+ void (*set_data_idleness)(struct stm32_spi *spi, u32 length);
+ int (*set_number_of_data)(struct stm32_spi *spi, u32 length);
+ void (*transfer_one_dma_start)(struct stm32_spi *spi);
+ void (*dma_rx_cb)(void *data);
+ void (*dma_tx_cb)(void *data);
+ int (*transfer_one_irq)(struct stm32_spi *spi);
+ irqreturn_t (*irq_handler_event)(int irq, void *dev_id);
+ irqreturn_t (*irq_handler_thread)(int irq, void *dev_id);
+ unsigned int baud_rate_div_min;
+ unsigned int baud_rate_div_max;
+ bool has_fifo;
+ u16 flags;
+};
+
+/**
+ * struct stm32_spi - private data of the SPI controller
+ * @dev: driver model representation of the controller
+ * @master: controller master interface
+ * @cfg: compatible configuration data
+ * @base: virtual memory area
+ * @clk: hw kernel clock feeding the SPI clock generator
+ * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
+ * @lock: prevent I/O concurrent access
+ * @irq: SPI controller interrupt line
+ * @fifo_size: size of the embedded fifo in bytes
+ * @cur_midi: master inter-data idleness in ns
+ * @cur_speed: speed configured in Hz
+ * @cur_half_period: time of a half bit in us
+ * @cur_bpw: number of bits in a single SPI data frame
+ * @cur_fthlv: fifo threshold level (data frames in a single data packet)
+ * @cur_comm: SPI communication mode
+ * @cur_xferlen: current transfer length in bytes
+ * @cur_usedma: boolean to know if dma is used in current transfer
+ * @tx_buf: data to be written, or NULL
+ * @rx_buf: data to be read, or NULL
+ * @tx_len: number of data to be written in bytes
+ * @rx_len: number of data to be read in bytes
+ * @dma_tx: dma channel for TX transfer
+ * @dma_rx: dma channel for RX transfer
+ * @phys_addr: SPI registers physical base address
+ */
+struct stm32_spi {
+ struct device *dev;
+ struct spi_master *master;
+ const struct stm32_spi_cfg *cfg;
+ void __iomem *base;
+ struct clk *clk;
+ u32 clk_rate;
+ spinlock_t lock; /* prevent I/O concurrent access */
+ int irq;
+ unsigned int fifo_size;
+
+ unsigned int cur_midi;
+ unsigned int cur_speed;
+ unsigned int cur_half_period;
+ unsigned int cur_bpw;
+ unsigned int cur_fthlv;
+ unsigned int cur_comm;
+ unsigned int cur_xferlen;
+ bool cur_usedma;
+
+ const void *tx_buf;
+ void *rx_buf;
+ int tx_len;
+ int rx_len;
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+ dma_addr_t phys_addr;
+};
+
+static const struct stm32_spi_regspec stm32f4_spi_regspec = {
+ .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE },
+
+ .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN },
+ .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN },
+
+ .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL },
+ .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA },
+ .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST },
+ .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT },
+
+ .rx = { STM32F4_SPI_DR },
+ .tx = { STM32F4_SPI_DR },
+};
+
+static const struct stm32_spi_regspec stm32h7_spi_regspec = {
+ /* SPI data transfer is enabled but spi_ker_ck is idle.
+ * CFG1 and CFG2 registers are write protected when SPE is enabled.
+ */
+ .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE },
+
+ .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN },
+ .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN },
+
+ .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL },
+ .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA },
+ .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST },
+ .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR,
+ STM32H7_SPI_CFG1_MBR_SHIFT },
+
+ .rx = { STM32H7_SPI_RXDR },
+ .tx = { STM32H7_SPI_TXDR },
+};
+
+static inline void stm32_spi_set_bits(struct stm32_spi *spi,
+ u32 offset, u32 bits)
+{
+ writel_relaxed(readl_relaxed(spi->base + offset) | bits,
+ spi->base + offset);
+}
+
+static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
+ u32 offset, u32 bits)
+{
+ writel_relaxed(readl_relaxed(spi->base + offset) & ~bits,
+ spi->base + offset);
+}
+
+/**
+ * stm32h7_spi_get_fifo_size - Return fifo size
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32h7_spi_get_fifo_size(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 count = 0;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
+
+ while (readl_relaxed(spi->base + STM32H7_SPI_SR) & STM32H7_SPI_SR_TXP)
+ writeb_relaxed(++count, spi->base + STM32H7_SPI_TXDR);
+
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ dev_dbg(spi->dev, "%d x 8-bit fifo size\n", count);
+
+ return count;
+}
+
+/**
+ * stm32f4_spi_get_bpw_mask - Return bits per word mask
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi)
+{
+ dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n");
+ return SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+}
+
+/**
+ * stm32h7_spi_get_bpw_mask - Return bits per word mask
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 cfg1, max_bpw;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ /*
+ * The most significant bit at DSIZE bit field is reserved when the
+ * maximum data size of periperal instances is limited to 16-bit
+ */
+ stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE);
+
+ cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1);
+ max_bpw = FIELD_GET(STM32H7_SPI_CFG1_DSIZE, cfg1) + 1;
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw);
+
+ return SPI_BPW_RANGE_MASK(4, max_bpw);
+}
+
+/**
+ * stm32_spi_prepare_mbr - Determine baud rate divisor value
+ * @spi: pointer to the spi controller data structure
+ * @speed_hz: requested speed
+ * @min_div: minimum baud rate divisor
+ * @max_div: maximum baud rate divisor
+ *
+ * Return baud rate divisor value in case of success or -EINVAL
+ */
+static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
+ u32 min_div, u32 max_div)
+{
+ u32 div, mbrdiv;
+
+ /* Ensure spi->clk_rate is even */
+ div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz);
+
+ /*
+ * SPI framework set xfer->speed_hz to master->max_speed_hz if
+ * xfer->speed_hz is greater than master->max_speed_hz, and it returns
+ * an error when xfer->speed_hz is lower than master->min_speed_hz, so
+ * no need to check it there.
+ * However, we need to ensure the following calculations.
+ */
+ if ((div < min_div) || (div > max_div))
+ return -EINVAL;
+
+ /* Determine the first power of 2 greater than or equal to div */
+ if (div & (div - 1))
+ mbrdiv = fls(div);
+ else
+ mbrdiv = fls(div) - 1;
+
+ spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
+
+ spi->cur_half_period = DIV_ROUND_CLOSEST(USEC_PER_SEC, 2 * spi->cur_speed);
+
+ return mbrdiv - 1;
+}
+
+/**
+ * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
+ * @spi: pointer to the spi controller data structure
+ * @xfer_len: length of the message to be transferred
+ */
+static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
+{
+ u32 packet, bpw;
+
+ /* data packet should not exceed 1/2 of fifo space */
+ packet = clamp(xfer_len, 1U, spi->fifo_size / 2);
+
+ /* align packet size with data registers access */
+ bpw = DIV_ROUND_UP(spi->cur_bpw, 8);
+ return DIV_ROUND_UP(packet, bpw);
+}
+
+/**
+ * stm32f4_spi_write_tx - Write bytes to Transmit Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Read from tx_buf depends on remaining bytes to avoid to read beyond
+ * tx_buf end.
+ */
+static void stm32f4_spi_write_tx(struct stm32_spi *spi)
+{
+ if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
+ STM32F4_SPI_SR_TXE)) {
+ u32 offs = spi->cur_xferlen - spi->tx_len;
+
+ if (spi->cur_bpw == 16) {
+ const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
+
+ writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR);
+ spi->tx_len -= sizeof(u16);
+ } else {
+ const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
+
+ writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR);
+ spi->tx_len -= sizeof(u8);
+ }
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
+}
+
+/**
+ * stm32h7_spi_write_txfifo - Write bytes in Transmit Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Read from tx_buf depends on remaining bytes to avoid to read beyond
+ * tx_buf end.
+ */
+static void stm32h7_spi_write_txfifo(struct stm32_spi *spi)
+{
+ while ((spi->tx_len > 0) &&
+ (readl_relaxed(spi->base + STM32H7_SPI_SR) &
+ STM32H7_SPI_SR_TXP)) {
+ u32 offs = spi->cur_xferlen - spi->tx_len;
+
+ if (spi->tx_len >= sizeof(u32)) {
+ const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
+
+ writel_relaxed(*tx_buf32, spi->base + STM32H7_SPI_TXDR);
+ spi->tx_len -= sizeof(u32);
+ } else if (spi->tx_len >= sizeof(u16)) {
+ const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
+
+ writew_relaxed(*tx_buf16, spi->base + STM32H7_SPI_TXDR);
+ spi->tx_len -= sizeof(u16);
+ } else {
+ const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
+
+ writeb_relaxed(*tx_buf8, spi->base + STM32H7_SPI_TXDR);
+ spi->tx_len -= sizeof(u8);
+ }
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
+}
+
+/**
+ * stm32f4_spi_read_rx - Read bytes from Receive Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Write in rx_buf depends on remaining bytes to avoid to write beyond
+ * rx_buf end.
+ */
+static void stm32f4_spi_read_rx(struct stm32_spi *spi)
+{
+ if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
+ STM32F4_SPI_SR_RXNE)) {
+ u32 offs = spi->cur_xferlen - spi->rx_len;
+
+ if (spi->cur_bpw == 16) {
+ u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
+
+ *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR);
+ spi->rx_len -= sizeof(u16);
+ } else {
+ u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
+
+ *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR);
+ spi->rx_len -= sizeof(u8);
+ }
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->rx_len);
+}
+
+/**
+ * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Write in rx_buf depends on remaining bytes to avoid to write beyond
+ * rx_buf end.
+ */
+static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi)
+{
+ u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
+ u32 rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
+
+ while ((spi->rx_len > 0) &&
+ ((sr & STM32H7_SPI_SR_RXP) ||
+ ((sr & STM32H7_SPI_SR_EOT) &&
+ ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
+ u32 offs = spi->cur_xferlen - spi->rx_len;
+
+ if ((spi->rx_len >= sizeof(u32)) ||
+ (sr & STM32H7_SPI_SR_RXWNE)) {
+ u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
+
+ *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
+ spi->rx_len -= sizeof(u32);
+ } else if ((spi->rx_len >= sizeof(u16)) ||
+ (!(sr & STM32H7_SPI_SR_RXWNE) &&
+ (rxplvl >= 2 || spi->cur_bpw > 8))) {
+ u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
+
+ *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
+ spi->rx_len -= sizeof(u16);
+ } else {
+ u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
+
+ *rx_buf8 = readb_relaxed(spi->base + STM32H7_SPI_RXDR);
+ spi->rx_len -= sizeof(u8);
+ }
+
+ sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
+ rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n",
+ __func__, spi->rx_len, sr);
+}
+
+/**
+ * stm32_spi_enable - Enable SPI controller
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32_spi_enable(struct stm32_spi *spi)
+{
+ dev_dbg(spi->dev, "enable controller\n");
+
+ stm32_spi_set_bits(spi, spi->cfg->regs->en.reg,
+ spi->cfg->regs->en.mask);
+}
+
+/**
+ * stm32f4_spi_disable - Disable SPI controller
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32f4_spi_disable(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 sr;
+
+ dev_dbg(spi->dev, "disable controller\n");
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) &
+ STM32F4_SPI_CR1_SPE)) {
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return;
+ }
+
+ /* Disable interrupts */
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE |
+ STM32F4_SPI_CR2_RXNEIE |
+ STM32F4_SPI_CR2_ERRIE);
+
+ /* Wait until BSY = 0 */
+ if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR,
+ sr, !(sr & STM32F4_SPI_SR_BSY),
+ 10, 100000) < 0) {
+ dev_warn(spi->dev, "disabling condition timeout\n");
+ }
+
+ if (spi->cur_usedma && spi->dma_tx)
+ dmaengine_terminate_all(spi->dma_tx);
+ if (spi->cur_usedma && spi->dma_rx)
+ dmaengine_terminate_all(spi->dma_rx);
+
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE);
+
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN |
+ STM32F4_SPI_CR2_RXDMAEN);
+
+ /* Sequence to clear OVR flag */
+ readl_relaxed(spi->base + STM32F4_SPI_DR);
+ readl_relaxed(spi->base + STM32F4_SPI_SR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+}
+
+/**
+ * stm32h7_spi_disable - Disable SPI controller
+ * @spi: pointer to the spi controller data structure
+ *
+ * RX-Fifo is flushed when SPI controller is disabled.
+ */
+static void stm32h7_spi_disable(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 cr1;
+
+ dev_dbg(spi->dev, "disable controller\n");
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ cr1 = readl_relaxed(spi->base + STM32H7_SPI_CR1);
+
+ if (!(cr1 & STM32H7_SPI_CR1_SPE)) {
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return;
+ }
+
+ /* Add a delay to make sure that transmission is ended. */
+ if (spi->cur_half_period)
+ udelay(spi->cur_half_period);
+
+ if (spi->cur_usedma && spi->dma_tx)
+ dmaengine_terminate_all(spi->dma_tx);
+ if (spi->cur_usedma && spi->dma_rx)
+ dmaengine_terminate_all(spi->dma_rx);
+
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
+
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN |
+ STM32H7_SPI_CFG1_RXDMAEN);
+
+ /* Disable interrupts and clear status flags */
+ writel_relaxed(0, spi->base + STM32H7_SPI_IER);
+ writel_relaxed(STM32H7_SPI_IFCR_ALL, spi->base + STM32H7_SPI_IFCR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+}
+
+/**
+ * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
+ * @master: controller master interface
+ * @spi_dev: pointer to the spi device
+ * @transfer: pointer to spi transfer
+ *
+ * If driver has fifo and the current transfer size is greater than fifo size,
+ * use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes.
+ */
+static bool stm32_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ unsigned int dma_size;
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ if (spi->cfg->has_fifo)
+ dma_size = spi->fifo_size;
+ else
+ dma_size = SPI_DMA_MIN_BYTES;
+
+ dev_dbg(spi->dev, "%s: %s\n", __func__,
+ (transfer->len > dma_size) ? "true" : "false");
+
+ return (transfer->len > dma_size);
+}
+
+/**
+ * stm32f4_spi_irq_event - Interrupt handler for SPI controller events
+ * @irq: interrupt line
+ * @dev_id: SPI controller master interface
+ */
+static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ u32 sr, mask = 0;
+ bool end = false;
+
+ spin_lock(&spi->lock);
+
+ sr = readl_relaxed(spi->base + STM32F4_SPI_SR);
+ /*
+ * BSY flag is not handled in interrupt but it is normal behavior when
+ * this flag is set.
+ */
+ sr &= ~STM32F4_SPI_SR_BSY;
+
+ if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX ||
+ spi->cur_comm == SPI_3WIRE_TX)) {
+ /* OVR flag shouldn't be handled for TX only mode */
+ sr &= ~(STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE);
+ mask |= STM32F4_SPI_SR_TXE;
+ }
+
+ if (!spi->cur_usedma && (spi->cur_comm == SPI_FULL_DUPLEX ||
+ spi->cur_comm == SPI_SIMPLEX_RX ||
+ spi->cur_comm == SPI_3WIRE_RX)) {
+ /* TXE flag is set and is handled when RXNE flag occurs */
+ sr &= ~STM32F4_SPI_SR_TXE;
+ mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR;
+ }
+
+ if (!(sr & mask)) {
+ dev_dbg(spi->dev, "spurious IT (sr=0x%08x)\n", sr);
+ spin_unlock(&spi->lock);
+ return IRQ_NONE;
+ }
+
+ if (sr & STM32F4_SPI_SR_OVR) {
+ dev_warn(spi->dev, "Overrun: received value discarded\n");
+
+ /* Sequence to clear OVR flag */
+ readl_relaxed(spi->base + STM32F4_SPI_DR);
+ readl_relaxed(spi->base + STM32F4_SPI_SR);
+
+ /*
+ * If overrun is detected, it means that something went wrong,
+ * so stop the current transfer. Transfer can wait for next
+ * RXNE but DR is already read and end never happens.
+ */
+ end = true;
+ goto end_irq;
+ }
+
+ if (sr & STM32F4_SPI_SR_TXE) {
+ if (spi->tx_buf)
+ stm32f4_spi_write_tx(spi);
+ if (spi->tx_len == 0)
+ end = true;
+ }
+
+ if (sr & STM32F4_SPI_SR_RXNE) {
+ stm32f4_spi_read_rx(spi);
+ if (spi->rx_len == 0)
+ end = true;
+ else if (spi->tx_buf)/* Load data for discontinuous mode */
+ stm32f4_spi_write_tx(spi);
+ }
+
+end_irq:
+ if (end) {
+ /* Immediately disable interrupts to do not generate new one */
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR2,
+ STM32F4_SPI_CR2_TXEIE |
+ STM32F4_SPI_CR2_RXNEIE |
+ STM32F4_SPI_CR2_ERRIE);
+ spin_unlock(&spi->lock);
+ return IRQ_WAKE_THREAD;
+ }
+
+ spin_unlock(&spi->lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller
+ * @irq: interrupt line
+ * @dev_id: SPI controller master interface
+ */
+static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ spi_finalize_current_transfer(master);
+ stm32f4_spi_disable(spi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * stm32h7_spi_irq_thread - Thread of interrupt handler for SPI controller
+ * @irq: interrupt line
+ * @dev_id: SPI controller master interface
+ */
+static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ u32 sr, ier, mask;
+ unsigned long flags;
+ bool end = false;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
+ ier = readl_relaxed(spi->base + STM32H7_SPI_IER);
+
+ mask = ier;
+ /*
+ * EOTIE enables irq from EOT, SUSP and TXC events. We need to set
+ * SUSP to acknowledge it later. TXC is automatically cleared
+ */
+
+ mask |= STM32H7_SPI_SR_SUSP;
+ /*
+ * DXPIE is set in Full-Duplex, one IT will be raised if TXP and RXP
+ * are set. So in case of Full-Duplex, need to poll TXP and RXP event.
+ */
+ if ((spi->cur_comm == SPI_FULL_DUPLEX) && !spi->cur_usedma)
+ mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
+
+ if (!(sr & mask)) {
+ dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+ sr, ier);
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (sr & STM32H7_SPI_SR_SUSP) {
+ static DEFINE_RATELIMIT_STATE(rs,
+ DEFAULT_RATELIMIT_INTERVAL * 10,
+ 1);
+ ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE);
+ if (__ratelimit(&rs))
+ dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
+ if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+ stm32h7_spi_read_rxfifo(spi);
+ /*
+ * If communication is suspended while using DMA, it means
+ * that something went wrong, so stop the current transfer
+ */
+ if (spi->cur_usedma)
+ end = true;
+ }
+
+ if (sr & STM32H7_SPI_SR_MODF) {
+ dev_warn(spi->dev, "Mode fault: transfer aborted\n");
+ end = true;
+ }
+
+ if (sr & STM32H7_SPI_SR_OVR) {
+ dev_err(spi->dev, "Overrun: RX data lost\n");
+ end = true;
+ }
+
+ if (sr & STM32H7_SPI_SR_EOT) {
+ if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+ stm32h7_spi_read_rxfifo(spi);
+ if (!spi->cur_usedma ||
+ (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX))
+ end = true;
+ }
+
+ if (sr & STM32H7_SPI_SR_TXP)
+ if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
+ stm32h7_spi_write_txfifo(spi);
+
+ if (sr & STM32H7_SPI_SR_RXP)
+ if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+ stm32h7_spi_read_rxfifo(spi);
+
+ writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ if (end) {
+ stm32h7_spi_disable(spi);
+ spi_finalize_current_transfer(master);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * stm32_spi_prepare_msg - set up the controller to transfer a single message
+ * @master: controller master interface
+ * @msg: pointer to spi message
+ */
+static int stm32_spi_prepare_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct spi_device *spi_dev = msg->spi;
+ struct device_node *np = spi_dev->dev.of_node;
+ unsigned long flags;
+ u32 clrb = 0, setb = 0;
+
+ /* SPI slave device may need time between data frames */
+ spi->cur_midi = 0;
+ if (np && !of_property_read_u32(np, "st,spi-midi-ns", &spi->cur_midi))
+ dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
+
+ if (spi_dev->mode & SPI_CPOL)
+ setb |= spi->cfg->regs->cpol.mask;
+ else
+ clrb |= spi->cfg->regs->cpol.mask;
+
+ if (spi_dev->mode & SPI_CPHA)
+ setb |= spi->cfg->regs->cpha.mask;
+ else
+ clrb |= spi->cfg->regs->cpha.mask;
+
+ if (spi_dev->mode & SPI_LSB_FIRST)
+ setb |= spi->cfg->regs->lsb_first.mask;
+ else
+ clrb |= spi->cfg->regs->lsb_first.mask;
+
+ dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
+ !!(spi_dev->mode & SPI_CPOL),
+ !!(spi_dev->mode & SPI_CPHA),
+ !!(spi_dev->mode & SPI_LSB_FIRST),
+ !!(spi_dev->mode & SPI_CS_HIGH));
+
+ /* On STM32H7, messages should not exceed a maximum size setted
+ * afterward via the set_number_of_data function. In order to
+ * ensure that, split large messages into several messages
+ */
+ if (spi->cfg->set_number_of_data) {
+ int ret;
+
+ ret = spi_split_transfers_maxsize(master, msg,
+ STM32H7_SPI_TSIZE_MAX,
+ GFP_KERNEL | GFP_DMA);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ /* CPOL, CPHA and LSB FIRST bits have common register */
+ if (clrb || setb)
+ writel_relaxed(
+ (readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) &
+ ~clrb) | setb,
+ spi->base + spi->cfg->regs->cpol.reg);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 0;
+}
+
+/**
+ * stm32f4_spi_dma_tx_cb - dma callback
+ * @data: pointer to the spi controller data structure
+ *
+ * DMA callback is called when the transfer is complete for DMA TX channel.
+ */
+static void stm32f4_spi_dma_tx_cb(void *data)
+{
+ struct stm32_spi *spi = data;
+
+ if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
+ spi_finalize_current_transfer(spi->master);
+ stm32f4_spi_disable(spi);
+ }
+}
+
+/**
+ * stm32_spi_dma_rx_cb - dma callback
+ * @data: pointer to the spi controller data structure
+ *
+ * DMA callback is called when the transfer is complete for DMA RX channel.
+ */
+static void stm32_spi_dma_rx_cb(void *data)
+{
+ struct stm32_spi *spi = data;
+
+ spi_finalize_current_transfer(spi->master);
+ spi->cfg->disable(spi);
+}
+
+/**
+ * stm32_spi_dma_config - configure dma slave channel depending on current
+ * transfer bits_per_word.
+ * @spi: pointer to the spi controller data structure
+ * @dma_conf: pointer to the dma_slave_config structure
+ * @dir: direction of the dma transfer
+ */
+static void stm32_spi_dma_config(struct stm32_spi *spi,
+ struct dma_slave_config *dma_conf,
+ enum dma_transfer_direction dir)
+{
+ enum dma_slave_buswidth buswidth;
+ u32 maxburst;
+
+ if (spi->cur_bpw <= 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (spi->cur_bpw <= 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ if (spi->cfg->has_fifo) {
+ /* Valid for DMA Half or Full Fifo threshold */
+ if (spi->cur_fthlv == 2)
+ maxburst = 1;
+ else
+ maxburst = spi->cur_fthlv;
+ } else {
+ maxburst = 1;
+ }
+
+ memset(dma_conf, 0, sizeof(struct dma_slave_config));
+ dma_conf->direction = dir;
+ if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
+ dma_conf->src_addr = spi->phys_addr + spi->cfg->regs->rx.reg;
+ dma_conf->src_addr_width = buswidth;
+ dma_conf->src_maxburst = maxburst;
+
+ dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
+ buswidth, maxburst);
+ } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
+ dma_conf->dst_addr = spi->phys_addr + spi->cfg->regs->tx.reg;
+ dma_conf->dst_addr_width = buswidth;
+ dma_conf->dst_maxburst = maxburst;
+
+ dev_dbg(spi->dev, "Tx DMA config buswidth=%d, maxburst=%d\n",
+ buswidth, maxburst);
+ }
+}
+
+/**
+ * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using
+ * interrupts
+ * @spi: pointer to the spi controller data structure
+ *
+ * It must returns 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 cr2 = 0;
+
+ /* Enable the interrupts relative to the current communication mode */
+ if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
+ cr2 |= STM32F4_SPI_CR2_TXEIE;
+ } else if (spi->cur_comm == SPI_FULL_DUPLEX ||
+ spi->cur_comm == SPI_SIMPLEX_RX ||
+ spi->cur_comm == SPI_3WIRE_RX) {
+ /* In transmit-only mode, the OVR flag is set in the SR register
+ * since the received data are never read. Therefore set OVR
+ * interrupt only when rx buffer is available.
+ */
+ cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE;
+ } else {
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2);
+
+ stm32_spi_enable(spi);
+
+ /* starting data transfer when buffer is loaded */
+ if (spi->tx_buf)
+ stm32f4_spi_write_tx(spi);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 1;
+}
+
+/**
+ * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using
+ * interrupts
+ * @spi: pointer to the spi controller data structure
+ *
+ * It must returns 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 ier = 0;
+
+ /* Enable the interrupts relative to the current communication mode */
+ if (spi->tx_buf && spi->rx_buf) /* Full Duplex */
+ ier |= STM32H7_SPI_IER_DXPIE;
+ else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */
+ ier |= STM32H7_SPI_IER_TXPIE;
+ else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */
+ ier |= STM32H7_SPI_IER_RXPIE;
+
+ /* Enable the interrupts relative to the end of transfer */
+ ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE |
+ STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ stm32_spi_enable(spi);
+
+ /* Be sure to have data in fifo before starting data transfer */
+ if (spi->tx_buf)
+ stm32h7_spi_write_txfifo(spi);
+
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
+
+ writel_relaxed(ier, spi->base + STM32H7_SPI_IER);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 1;
+}
+
+/**
+ * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start
+ * transfer using DMA
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
+{
+ /* In DMA mode end of transfer is handled by DMA TX or RX callback. */
+ if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX ||
+ spi->cur_comm == SPI_FULL_DUPLEX) {
+ /*
+ * In transmit-only mode, the OVR flag is set in the SR register
+ * since the received data are never read. Therefore set OVR
+ * interrupt only when rx buffer is available.
+ */
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE);
+ }
+
+ stm32_spi_enable(spi);
+}
+
+/**
+ * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start
+ * transfer using DMA
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
+{
+ uint32_t ier = STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
+
+ /* Enable the interrupts */
+ if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX)
+ ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE;
+
+ stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
+
+ stm32_spi_enable(spi);
+
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
+}
+
+/**
+ * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
+ * @spi: pointer to the spi controller data structure
+ * @xfer: pointer to the spi_transfer structure
+ *
+ * It must returns 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
+ struct spi_transfer *xfer)
+{
+ struct dma_slave_config tx_dma_conf, rx_dma_conf;
+ struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ rx_dma_desc = NULL;
+ if (spi->rx_buf && spi->dma_rx) {
+ stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
+ dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
+
+ /* Enable Rx DMA request */
+ stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg,
+ spi->cfg->regs->dma_rx_en.mask);
+
+ rx_dma_desc = dmaengine_prep_slave_sg(
+ spi->dma_rx, xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ rx_dma_conf.direction,
+ DMA_PREP_INTERRUPT);
+ }
+
+ tx_dma_desc = NULL;
+ if (spi->tx_buf && spi->dma_tx) {
+ stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
+ dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
+
+ tx_dma_desc = dmaengine_prep_slave_sg(
+ spi->dma_tx, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ tx_dma_conf.direction,
+ DMA_PREP_INTERRUPT);
+ }
+
+ if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) ||
+ (spi->rx_buf && spi->dma_rx && !rx_dma_desc))
+ goto dma_desc_error;
+
+ if (spi->cur_comm == SPI_FULL_DUPLEX && (!tx_dma_desc || !rx_dma_desc))
+ goto dma_desc_error;
+
+ if (rx_dma_desc) {
+ rx_dma_desc->callback = spi->cfg->dma_rx_cb;
+ rx_dma_desc->callback_param = spi;
+
+ if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
+ dev_err(spi->dev, "Rx DMA submit failed\n");
+ goto dma_desc_error;
+ }
+ /* Enable Rx DMA channel */
+ dma_async_issue_pending(spi->dma_rx);
+ }
+
+ if (tx_dma_desc) {
+ if (spi->cur_comm == SPI_SIMPLEX_TX ||
+ spi->cur_comm == SPI_3WIRE_TX) {
+ tx_dma_desc->callback = spi->cfg->dma_tx_cb;
+ tx_dma_desc->callback_param = spi;
+ }
+
+ if (dma_submit_error(dmaengine_submit(tx_dma_desc))) {
+ dev_err(spi->dev, "Tx DMA submit failed\n");
+ goto dma_submit_error;
+ }
+ /* Enable Tx DMA channel */
+ dma_async_issue_pending(spi->dma_tx);
+
+ /* Enable Tx DMA request */
+ stm32_spi_set_bits(spi, spi->cfg->regs->dma_tx_en.reg,
+ spi->cfg->regs->dma_tx_en.mask);
+ }
+
+ spi->cfg->transfer_one_dma_start(spi);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 1;
+
+dma_submit_error:
+ if (spi->dma_rx)
+ dmaengine_terminate_all(spi->dma_rx);
+
+dma_desc_error:
+ stm32_spi_clr_bits(spi, spi->cfg->regs->dma_rx_en.reg,
+ spi->cfg->regs->dma_rx_en.mask);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
+
+ spi->cur_usedma = false;
+ return spi->cfg->transfer_one_irq(spi);
+}
+
+/**
+ * stm32f4_spi_set_bpw - Configure bits per word
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32f4_spi_set_bpw(struct stm32_spi *spi)
+{
+ if (spi->cur_bpw == 16)
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
+ else
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
+}
+
+/**
+ * stm32h7_spi_set_bpw - configure bits per word
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
+{
+ u32 bpw, fthlv;
+ u32 cfg1_clrb = 0, cfg1_setb = 0;
+
+ bpw = spi->cur_bpw - 1;
+
+ cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE;
+ cfg1_setb |= FIELD_PREP(STM32H7_SPI_CFG1_DSIZE, bpw);
+
+ spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen);
+ fthlv = spi->cur_fthlv - 1;
+
+ cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
+ cfg1_setb |= FIELD_PREP(STM32H7_SPI_CFG1_FTHLV, fthlv);
+
+ writel_relaxed(
+ (readl_relaxed(spi->base + STM32H7_SPI_CFG1) &
+ ~cfg1_clrb) | cfg1_setb,
+ spi->base + STM32H7_SPI_CFG1);
+}
+
+/**
+ * stm32_spi_set_mbr - Configure baud rate divisor in master mode
+ * @spi: pointer to the spi controller data structure
+ * @mbrdiv: baud rate divisor value
+ */
+static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv)
+{
+ u32 clrb = 0, setb = 0;
+
+ clrb |= spi->cfg->regs->br.mask;
+ setb |= (mbrdiv << spi->cfg->regs->br.shift) & spi->cfg->regs->br.mask;
+
+ writel_relaxed((readl_relaxed(spi->base + spi->cfg->regs->br.reg) &
+ ~clrb) | setb,
+ spi->base + spi->cfg->regs->br.reg);
+}
+
+/**
+ * stm32_spi_communication_type - return transfer communication type
+ * @spi_dev: pointer to the spi device
+ * @transfer: pointer to spi transfer
+ */
+static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ unsigned int type = SPI_FULL_DUPLEX;
+
+ if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
+ /*
+ * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
+ * is forbidden and unvalidated by SPI subsystem so depending
+ * on the valid buffer, we can determine the direction of the
+ * transfer.
+ */
+ if (!transfer->tx_buf)
+ type = SPI_3WIRE_RX;
+ else
+ type = SPI_3WIRE_TX;
+ } else {
+ if (!transfer->tx_buf)
+ type = SPI_SIMPLEX_RX;
+ else if (!transfer->rx_buf)
+ type = SPI_SIMPLEX_TX;
+ }
+
+ return type;
+}
+
+/**
+ * stm32f4_spi_set_mode - configure communication mode
+ * @spi: pointer to the spi controller data structure
+ * @comm_type: type of communication to configure
+ */
+static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
+{
+ if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) {
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
+ STM32F4_SPI_CR1_BIDIMODE |
+ STM32F4_SPI_CR1_BIDIOE);
+ } else if (comm_type == SPI_FULL_DUPLEX ||
+ comm_type == SPI_SIMPLEX_RX) {
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
+ STM32F4_SPI_CR1_BIDIMODE |
+ STM32F4_SPI_CR1_BIDIOE);
+ } else if (comm_type == SPI_3WIRE_RX) {
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
+ STM32F4_SPI_CR1_BIDIMODE);
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
+ STM32F4_SPI_CR1_BIDIOE);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * stm32h7_spi_set_mode - configure communication mode
+ * @spi: pointer to the spi controller data structure
+ * @comm_type: type of communication to configure
+ */
+static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
+{
+ u32 mode;
+ u32 cfg2_clrb = 0, cfg2_setb = 0;
+
+ if (comm_type == SPI_3WIRE_RX) {
+ mode = STM32H7_SPI_HALF_DUPLEX;
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
+ } else if (comm_type == SPI_3WIRE_TX) {
+ mode = STM32H7_SPI_HALF_DUPLEX;
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
+ } else if (comm_type == SPI_SIMPLEX_RX) {
+ mode = STM32H7_SPI_SIMPLEX_RX;
+ } else if (comm_type == SPI_SIMPLEX_TX) {
+ mode = STM32H7_SPI_SIMPLEX_TX;
+ } else {
+ mode = STM32H7_SPI_FULL_DUPLEX;
+ }
+
+ cfg2_clrb |= STM32H7_SPI_CFG2_COMM;
+ cfg2_setb |= FIELD_PREP(STM32H7_SPI_CFG2_COMM, mode);
+
+ writel_relaxed(
+ (readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
+ ~cfg2_clrb) | cfg2_setb,
+ spi->base + STM32H7_SPI_CFG2);
+
+ return 0;
+}
+
+/**
+ * stm32h7_spi_data_idleness - configure minimum time delay inserted between two
+ * consecutive data frames in master mode
+ * @spi: pointer to the spi controller data structure
+ * @len: transfer len
+ */
+static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
+{
+ u32 cfg2_clrb = 0, cfg2_setb = 0;
+
+ cfg2_clrb |= STM32H7_SPI_CFG2_MIDI;
+ if ((len > 1) && (spi->cur_midi > 0)) {
+ u32 sck_period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->cur_speed);
+ u32 midi = min_t(u32,
+ DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
+ FIELD_GET(STM32H7_SPI_CFG2_MIDI,
+ STM32H7_SPI_CFG2_MIDI));
+
+
+ dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
+ sck_period_ns, midi, midi * sck_period_ns);
+ cfg2_setb |= FIELD_PREP(STM32H7_SPI_CFG2_MIDI, midi);
+ }
+
+ writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
+ ~cfg2_clrb) | cfg2_setb,
+ spi->base + STM32H7_SPI_CFG2);
+}
+
+/**
+ * stm32h7_spi_number_of_data - configure number of data at current transfer
+ * @spi: pointer to the spi controller data structure
+ * @nb_words: transfer length (in words)
+ */
+static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
+{
+ if (nb_words <= STM32H7_SPI_TSIZE_MAX) {
+ writel_relaxed(FIELD_PREP(STM32H7_SPI_CR2_TSIZE, nb_words),
+ spi->base + STM32H7_SPI_CR2);
+ } else {
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+/**
+ * stm32_spi_transfer_one_setup - common setup to transfer a single
+ * spi_transfer either using DMA or
+ * interrupts.
+ * @spi: pointer to the spi controller data structure
+ * @spi_dev: pointer to the spi device
+ * @transfer: pointer to spi transfer
+ */
+static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
+ struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ unsigned long flags;
+ unsigned int comm_type;
+ int nb_words, ret = 0;
+ int mbr;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ spi->cur_xferlen = transfer->len;
+
+ spi->cur_bpw = transfer->bits_per_word;
+ spi->cfg->set_bpw(spi);
+
+ /* Update spi->cur_speed with real clock speed */
+ mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
+ spi->cfg->baud_rate_div_min,
+ spi->cfg->baud_rate_div_max);
+ if (mbr < 0) {
+ ret = mbr;
+ goto out;
+ }
+
+ transfer->speed_hz = spi->cur_speed;
+ stm32_spi_set_mbr(spi, mbr);
+
+ comm_type = stm32_spi_communication_type(spi_dev, transfer);
+ ret = spi->cfg->set_mode(spi, comm_type);
+ if (ret < 0)
+ goto out;
+
+ spi->cur_comm = comm_type;
+
+ if (spi->cfg->set_data_idleness)
+ spi->cfg->set_data_idleness(spi, transfer->len);
+
+ if (spi->cur_bpw <= 8)
+ nb_words = transfer->len;
+ else if (spi->cur_bpw <= 16)
+ nb_words = DIV_ROUND_UP(transfer->len * 8, 16);
+ else
+ nb_words = DIV_ROUND_UP(transfer->len * 8, 32);
+
+ if (spi->cfg->set_number_of_data) {
+ ret = spi->cfg->set_number_of_data(spi, nb_words);
+ if (ret < 0)
+ goto out;
+ }
+
+ dev_dbg(spi->dev, "transfer communication mode set to %d\n",
+ spi->cur_comm);
+ dev_dbg(spi->dev,
+ "data frame of %d-bit, data packet of %d data frames\n",
+ spi->cur_bpw, spi->cur_fthlv);
+ dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
+ dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
+ spi->cur_xferlen, nb_words);
+ dev_dbg(spi->dev, "dma %s\n",
+ (spi->cur_usedma) ? "enabled" : "disabled");
+
+out:
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return ret;
+}
+
+/**
+ * stm32_spi_transfer_one - transfer a single spi_transfer
+ * @master: controller master interface
+ * @spi_dev: pointer to the spi device
+ * @transfer: pointer to spi transfer
+ *
+ * It must return 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ spi->tx_buf = transfer->tx_buf;
+ spi->rx_buf = transfer->rx_buf;
+ spi->tx_len = spi->tx_buf ? transfer->len : 0;
+ spi->rx_len = spi->rx_buf ? transfer->len : 0;
+
+ spi->cur_usedma = (master->can_dma &&
+ master->can_dma(master, spi_dev, transfer));
+
+ ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
+ if (ret) {
+ dev_err(spi->dev, "SPI transfer setup failed\n");
+ return ret;
+ }
+
+ if (spi->cur_usedma)
+ return stm32_spi_transfer_one_dma(spi, transfer);
+ else
+ return spi->cfg->transfer_one_irq(spi);
+}
+
+/**
+ * stm32_spi_unprepare_msg - relax the hardware
+ * @master: controller master interface
+ * @msg: pointer to the spi message
+ */
+static int stm32_spi_unprepare_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ spi->cfg->disable(spi);
+
+ return 0;
+}
+
+/**
+ * stm32f4_spi_config - Configure SPI controller as SPI master
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32f4_spi_config(struct stm32_spi *spi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ /* Ensure I2SMOD bit is kept cleared */
+ stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR,
+ STM32F4_SPI_I2SCFGR_I2SMOD);
+
+ /*
+ * - SS input value high
+ * - transmitter half duplex direction
+ * - Set the master mode (default Motorola mode)
+ * - Consider 1 master/n slaves configuration and
+ * SS input value is determined by the SSI bit
+ */
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI |
+ STM32F4_SPI_CR1_BIDIOE |
+ STM32F4_SPI_CR1_MSTR |
+ STM32F4_SPI_CR1_SSM);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 0;
+}
+
+/**
+ * stm32h7_spi_config - Configure SPI controller as SPI master
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32h7_spi_config(struct stm32_spi *spi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ /* Ensure I2SMOD bit is kept cleared */
+ stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR,
+ STM32H7_SPI_I2SCFGR_I2SMOD);
+
+ /*
+ * - SS input value high
+ * - transmitter half duplex direction
+ * - automatic communication suspend when RX-Fifo is full
+ */
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SSI |
+ STM32H7_SPI_CR1_HDDIR |
+ STM32H7_SPI_CR1_MASRX);
+
+ /*
+ * - Set the master mode (default Motorola mode)
+ * - Consider 1 master/n slaves configuration and
+ * SS input value is determined by the SSI bit
+ * - keep control of all associated GPIOs
+ */
+ stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_MASTER |
+ STM32H7_SPI_CFG2_SSM |
+ STM32H7_SPI_CFG2_AFCNTR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 0;
+}
+
+static const struct stm32_spi_cfg stm32f4_spi_cfg = {
+ .regs = &stm32f4_spi_regspec,
+ .get_bpw_mask = stm32f4_spi_get_bpw_mask,
+ .disable = stm32f4_spi_disable,
+ .config = stm32f4_spi_config,
+ .set_bpw = stm32f4_spi_set_bpw,
+ .set_mode = stm32f4_spi_set_mode,
+ .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
+ .dma_tx_cb = stm32f4_spi_dma_tx_cb,
+ .dma_rx_cb = stm32_spi_dma_rx_cb,
+ .transfer_one_irq = stm32f4_spi_transfer_one_irq,
+ .irq_handler_event = stm32f4_spi_irq_event,
+ .irq_handler_thread = stm32f4_spi_irq_thread,
+ .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
+ .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
+ .has_fifo = false,
+ .flags = SPI_MASTER_MUST_TX,
+};
+
+static const struct stm32_spi_cfg stm32h7_spi_cfg = {
+ .regs = &stm32h7_spi_regspec,
+ .get_fifo_size = stm32h7_spi_get_fifo_size,
+ .get_bpw_mask = stm32h7_spi_get_bpw_mask,
+ .disable = stm32h7_spi_disable,
+ .config = stm32h7_spi_config,
+ .set_bpw = stm32h7_spi_set_bpw,
+ .set_mode = stm32h7_spi_set_mode,
+ .set_data_idleness = stm32h7_spi_data_idleness,
+ .set_number_of_data = stm32h7_spi_number_of_data,
+ .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
+ .dma_rx_cb = stm32_spi_dma_rx_cb,
+ /*
+ * dma_tx_cb is not necessary since in case of TX, dma is followed by
+ * SPI access hence handling is performed within the SPI interrupt
+ */
+ .transfer_one_irq = stm32h7_spi_transfer_one_irq,
+ .irq_handler_thread = stm32h7_spi_irq_thread,
+ .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
+ .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX,
+ .has_fifo = true,
+};
+
+static const struct of_device_id stm32_spi_of_match[] = {
+ { .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg },
+ { .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
+
+static int stm32_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct stm32_spi *spi;
+ struct resource *res;
+ struct reset_control *rst;
+ int ret;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "spi master allocation failed\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+
+ spi = spi_master_get_devdata(master);
+ spi->dev = &pdev->dev;
+ spi->master = master;
+ spin_lock_init(&spi->lock);
+
+ spi->cfg = (const struct stm32_spi_cfg *)
+ of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev)->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi->base))
+ return PTR_ERR(spi->base);
+
+ spi->phys_addr = (dma_addr_t)res->start;
+
+ spi->irq = platform_get_irq(pdev, 0);
+ if (spi->irq <= 0)
+ return dev_err_probe(&pdev->dev, spi->irq,
+ "failed to get irq\n");
+
+ ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
+ spi->cfg->irq_handler_event,
+ spi->cfg->irq_handler_thread,
+ IRQF_ONESHOT, pdev->name, master);
+ if (ret) {
+ dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
+ ret);
+ return ret;
+ }
+
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ ret = PTR_ERR(spi->clk);
+ dev_err(&pdev->dev, "clk get failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(spi->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
+ return ret;
+ }
+ spi->clk_rate = clk_get_rate(spi->clk);
+ if (!spi->clk_rate) {
+ dev_err(&pdev->dev, "clk rate = 0\n");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (rst) {
+ if (IS_ERR(rst)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(rst),
+ "failed to get reset\n");
+ goto err_clk_disable;
+ }
+
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
+
+ if (spi->cfg->has_fifo)
+ spi->fifo_size = spi->cfg->get_fifo_size(spi);
+
+ ret = spi->cfg->config(spi);
+ if (ret) {
+ dev_err(&pdev->dev, "controller configuration failed: %d\n",
+ ret);
+ goto err_clk_disable;
+ }
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_3WIRE;
+ master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
+ master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
+ master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
+ master->use_gpio_descriptors = true;
+ master->prepare_message = stm32_spi_prepare_msg;
+ master->transfer_one = stm32_spi_transfer_one;
+ master->unprepare_message = stm32_spi_unprepare_msg;
+ master->flags = spi->cfg->flags;
+
+ spi->dma_tx = dma_request_chan(spi->dev, "tx");
+ if (IS_ERR(spi->dma_tx)) {
+ ret = PTR_ERR(spi->dma_tx);
+ spi->dma_tx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_clk_disable;
+
+ dev_warn(&pdev->dev, "failed to request tx dma channel\n");
+ } else {
+ master->dma_tx = spi->dma_tx;
+ }
+
+ spi->dma_rx = dma_request_chan(spi->dev, "rx");
+ if (IS_ERR(spi->dma_rx)) {
+ ret = PTR_ERR(spi->dma_rx);
+ spi->dma_rx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma_release;
+
+ dev_warn(&pdev->dev, "failed to request rx dma channel\n");
+ } else {
+ master->dma_rx = spi->dma_rx;
+ }
+
+ if (spi->dma_tx || spi->dma_rx)
+ master->can_dma = stm32_spi_can_dma;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ STM32_SPI_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi master registration failed: %d\n",
+ ret);
+ goto err_pm_disable;
+ }
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ dev_info(&pdev->dev, "driver initialized\n");
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+err_dma_release:
+ if (spi->dma_tx)
+ dma_release_channel(spi->dma_tx);
+ if (spi->dma_rx)
+ dma_release_channel(spi->dma_rx);
+err_clk_disable:
+ clk_disable_unprepare(spi->clk);
+
+ return ret;
+}
+
+static int stm32_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ spi_unregister_master(master);
+ spi->cfg->disable(spi);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
+
+ clk_disable_unprepare(spi->clk);
+
+
+ pinctrl_pm_select_sleep_state(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spi->clk);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int __maybe_unused stm32_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ return clk_prepare_enable(spi->clk);
+}
+
+static int __maybe_unused stm32_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused stm32_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(master);
+ if (ret) {
+ clk_disable_unprepare(spi->clk);
+ return ret;
+ }
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Unable to power device:%d\n", ret);
+ return ret;
+ }
+
+ spi->cfg->config(spi);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops stm32_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_spi_suspend, stm32_spi_resume)
+ SET_RUNTIME_PM_OPS(stm32_spi_runtime_suspend,
+ stm32_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_spi_driver = {
+ .probe = stm32_spi_probe,
+ .remove = stm32_spi_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &stm32_spi_pm_ops,
+ .of_match_table = stm32_spi_of_match,
+ },
+};
+
+module_platform_driver(stm32_spi_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_DESCRIPTION("STMicroelectronics STM32 SPI Controller driver");
+MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
new file mode 100644
index 000000000..6000d0761
--- /dev/null
+++ b/drivers/spi/spi-sun4i.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/spi/spi.h>
+
+#define SUN4I_FIFO_DEPTH 64
+
+#define SUN4I_RXDATA_REG 0x00
+
+#define SUN4I_TXDATA_REG 0x04
+
+#define SUN4I_CTL_REG 0x08
+#define SUN4I_CTL_ENABLE BIT(0)
+#define SUN4I_CTL_MASTER BIT(1)
+#define SUN4I_CTL_CPHA BIT(2)
+#define SUN4I_CTL_CPOL BIT(3)
+#define SUN4I_CTL_CS_ACTIVE_LOW BIT(4)
+#define SUN4I_CTL_LMTF BIT(6)
+#define SUN4I_CTL_TF_RST BIT(8)
+#define SUN4I_CTL_RF_RST BIT(9)
+#define SUN4I_CTL_XCH BIT(10)
+#define SUN4I_CTL_CS_MASK 0x3000
+#define SUN4I_CTL_CS(cs) (((cs) << 12) & SUN4I_CTL_CS_MASK)
+#define SUN4I_CTL_DHB BIT(15)
+#define SUN4I_CTL_CS_MANUAL BIT(16)
+#define SUN4I_CTL_CS_LEVEL BIT(17)
+#define SUN4I_CTL_TP BIT(18)
+
+#define SUN4I_INT_CTL_REG 0x0c
+#define SUN4I_INT_CTL_RF_F34 BIT(4)
+#define SUN4I_INT_CTL_TF_E34 BIT(12)
+#define SUN4I_INT_CTL_TC BIT(16)
+
+#define SUN4I_INT_STA_REG 0x10
+
+#define SUN4I_DMA_CTL_REG 0x14
+
+#define SUN4I_WAIT_REG 0x18
+
+#define SUN4I_CLK_CTL_REG 0x1c
+#define SUN4I_CLK_CTL_CDR2_MASK 0xff
+#define SUN4I_CLK_CTL_CDR2(div) ((div) & SUN4I_CLK_CTL_CDR2_MASK)
+#define SUN4I_CLK_CTL_CDR1_MASK 0xf
+#define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN4I_CLK_CTL_DRS BIT(12)
+
+#define SUN4I_MAX_XFER_SIZE 0xffffff
+
+#define SUN4I_BURST_CNT_REG 0x20
+#define SUN4I_BURST_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE)
+
+#define SUN4I_XMIT_CNT_REG 0x24
+#define SUN4I_XMIT_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE)
+
+
+#define SUN4I_FIFO_STA_REG 0x28
+#define SUN4I_FIFO_STA_RF_CNT_MASK 0x7f
+#define SUN4I_FIFO_STA_RF_CNT_BITS 0
+#define SUN4I_FIFO_STA_TF_CNT_MASK 0x7f
+#define SUN4I_FIFO_STA_TF_CNT_BITS 16
+
+struct sun4i_spi {
+ struct spi_master *master;
+ void __iomem *base_addr;
+ struct clk *hclk;
+ struct clk *mclk;
+
+ struct completion done;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 sun4i_spi_read(struct sun4i_spi *sspi, u32 reg)
+{
+ return readl(sspi->base_addr + reg);
+}
+
+static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value)
+{
+ writel(value, sspi->base_addr + reg);
+}
+
+static inline u32 sun4i_spi_get_tx_fifo_count(struct sun4i_spi *sspi)
+{
+ u32 reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
+
+ reg >>= SUN4I_FIFO_STA_TF_CNT_BITS;
+
+ return reg & SUN4I_FIFO_STA_TF_CNT_MASK;
+}
+
+static inline void sun4i_spi_enable_interrupt(struct sun4i_spi *sspi, u32 mask)
+{
+ u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG);
+
+ reg |= mask;
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg);
+}
+
+static inline void sun4i_spi_disable_interrupt(struct sun4i_spi *sspi, u32 mask)
+{
+ u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG);
+
+ reg &= ~mask;
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg);
+}
+
+static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
+{
+ u32 reg, cnt;
+ u8 byte;
+
+ /* See how much data is available */
+ reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
+ reg &= SUN4I_FIFO_STA_RF_CNT_MASK;
+ cnt = reg >> SUN4I_FIFO_STA_RF_CNT_BITS;
+
+ if (len > cnt)
+ len = cnt;
+
+ while (len--) {
+ byte = readb(sspi->base_addr + SUN4I_RXDATA_REG);
+ if (sspi->rx_buf)
+ *sspi->rx_buf++ = byte;
+ }
+}
+
+static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
+{
+ u32 cnt;
+ u8 byte;
+
+ /* See how much data we can fit */
+ cnt = SUN4I_FIFO_DEPTH - sun4i_spi_get_tx_fifo_count(sspi);
+
+ len = min3(len, (int)cnt, sspi->len);
+
+ while (len--) {
+ byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
+ writeb(byte, sspi->base_addr + SUN4I_TXDATA_REG);
+ sspi->len--;
+ }
+}
+
+static void sun4i_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct sun4i_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+
+ reg &= ~SUN4I_CTL_CS_MASK;
+ reg |= SUN4I_CTL_CS(spi->chip_select);
+
+ /* We want to control the chip select manually */
+ reg |= SUN4I_CTL_CS_MANUAL;
+
+ if (enable)
+ reg |= SUN4I_CTL_CS_LEVEL;
+ else
+ reg &= ~SUN4I_CTL_CS_LEVEL;
+
+ /*
+ * Even though this looks irrelevant since we are supposed to
+ * be controlling the chip select manually, this bit also
+ * controls the levels of the chip select for inactive
+ * devices.
+ *
+ * If we don't set it, the chip select level will go low by
+ * default when the device is idle, which is not really
+ * expected in the common case where the chip select is active
+ * low.
+ */
+ if (spi->mode & SPI_CS_HIGH)
+ reg &= ~SUN4I_CTL_CS_ACTIVE_LOW;
+ else
+ reg |= SUN4I_CTL_CS_ACTIVE_LOW;
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+}
+
+static size_t sun4i_spi_max_transfer_size(struct spi_device *spi)
+{
+ return SUN4I_MAX_XFER_SIZE - 1;
+}
+
+static int sun4i_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
+ unsigned int tx_len = 0;
+ int ret = 0;
+ u32 reg;
+
+ /* We don't support transfer larger than the FIFO */
+ if (tfr->len > SUN4I_MAX_XFER_SIZE)
+ return -EMSGSIZE;
+
+ if (tfr->tx_buf && tfr->len >= SUN4I_MAX_XFER_SIZE)
+ return -EMSGSIZE;
+
+ reinit_completion(&sspi->done);
+ sspi->tx_buf = tfr->tx_buf;
+ sspi->rx_buf = tfr->rx_buf;
+ sspi->len = tfr->len;
+
+ /* Clear pending interrupts */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, ~0);
+
+
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+
+ /* Reset FIFOs */
+ sun4i_spi_write(sspi, SUN4I_CTL_REG,
+ reg | SUN4I_CTL_RF_RST | SUN4I_CTL_TF_RST);
+
+ /*
+ * Setup the transfer control register: Chip Select,
+ * polarities, etc.
+ */
+ if (spi->mode & SPI_CPOL)
+ reg |= SUN4I_CTL_CPOL;
+ else
+ reg &= ~SUN4I_CTL_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= SUN4I_CTL_CPHA;
+ else
+ reg &= ~SUN4I_CTL_CPHA;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= SUN4I_CTL_LMTF;
+ else
+ reg &= ~SUN4I_CTL_LMTF;
+
+
+ /*
+ * If it's a TX only transfer, we don't want to fill the RX
+ * FIFO with bogus data
+ */
+ if (sspi->rx_buf)
+ reg &= ~SUN4I_CTL_DHB;
+ else
+ reg |= SUN4I_CTL_DHB;
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+
+ /* Ensure that we have a parent clock fast enough */
+ mclk_rate = clk_get_rate(sspi->mclk);
+ if (mclk_rate < (2 * tfr->speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * tfr->speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ (cdr + 1))
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Whether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div = mclk_rate / (2 * tfr->speed_hz);
+ if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) {
+ if (div > 0)
+ div--;
+
+ reg = SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS;
+ } else {
+ div = ilog2(mclk_rate) - ilog2(tfr->speed_hz);
+ reg = SUN4I_CLK_CTL_CDR1(div);
+ }
+
+ sun4i_spi_write(sspi, SUN4I_CLK_CTL_REG, reg);
+
+ /* Setup the transfer now... */
+ if (sspi->tx_buf)
+ tx_len = tfr->len;
+
+ /* Setup the counters */
+ sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
+ sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
+
+ /*
+ * Fill the TX FIFO
+ * Filling the FIFO fully causes timeout for some reason
+ * at least on spi2 on A10s
+ */
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
+
+ /* Enable the interrupts */
+ sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TC |
+ SUN4I_INT_CTL_RF_F34);
+ /* Only enable Tx FIFO interrupt if we really need it */
+ if (tx_len > SUN4I_FIFO_DEPTH)
+ sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TF_E34);
+
+ /* Start the transfer */
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
+ timeout = wait_for_completion_timeout(&sspi->done,
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
+ if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+
+out:
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0);
+
+ return ret;
+}
+
+static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
+{
+ struct sun4i_spi *sspi = dev_id;
+ u32 status = sun4i_spi_read(sspi, SUN4I_INT_STA_REG);
+
+ /* Transfer complete */
+ if (status & SUN4I_INT_CTL_TC) {
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC);
+ sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
+ complete(&sspi->done);
+ return IRQ_HANDLED;
+ }
+
+ /* Receive FIFO 3/4 full */
+ if (status & SUN4I_INT_CTL_RF_F34) {
+ sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
+ /* Only clear the interrupt _after_ draining the FIFO */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_RF_F34);
+ return IRQ_HANDLED;
+ }
+
+ /* Transmit FIFO 3/4 empty */
+ if (status & SUN4I_INT_CTL_TF_E34) {
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+ if (!sspi->len)
+ /* nothing left to transmit */
+ sun4i_spi_disable_interrupt(sspi, SUN4I_INT_CTL_TF_E34);
+
+ /* Only clear the interrupt _after_ re-seeding the FIFO */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TF_E34);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sun4i_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sspi->hclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable AHB clock\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(sspi->mclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable module clock\n");
+ goto err;
+ }
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG,
+ SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(sspi->hclk);
+out:
+ return ret;
+}
+
+static int sun4i_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(sspi->mclk);
+ clk_disable_unprepare(sspi->hclk);
+
+ return 0;
+}
+
+static int sun4i_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sun4i_spi *sspi;
+ int ret = 0, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ sspi->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sspi->base_addr)) {
+ ret = PTR_ERR(sspi->base_addr);
+ goto err_free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENXIO;
+ goto err_free_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun4i_spi_handler,
+ 0, "sun4i-spi", sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_free_master;
+ }
+
+ sspi->master = master;
+ master->max_speed_hz = 100 * 1000 * 1000;
+ master->min_speed_hz = 3 * 1000;
+ master->set_cs = sun4i_spi_set_cs;
+ master->transfer_one = sun4i_spi_transfer_one;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+ master->max_transfer_size = sun4i_spi_max_transfer_size;
+
+ sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sspi->hclk)) {
+ dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+ ret = PTR_ERR(sspi->hclk);
+ goto err_free_master;
+ }
+
+ sspi->mclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sspi->mclk)) {
+ dev_err(&pdev->dev, "Unable to acquire module clock\n");
+ ret = PTR_ERR(sspi->mclk);
+ goto err_free_master;
+ }
+
+ init_completion(&sspi->done);
+
+ /*
+ * This wake-up/shutdown pattern is to be able to have the
+ * device woken up, even if runtime_pm is disabled
+ */
+ ret = sun4i_spi_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't resume the device\n");
+ goto err_free_master;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ goto err_pm_disable;
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ sun4i_spi_runtime_suspend(&pdev->dev);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sun4i_spi_remove(struct platform_device *pdev)
+{
+ pm_runtime_force_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_spi_match[] = {
+ { .compatible = "allwinner,sun4i-a10-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun4i_spi_match);
+
+static const struct dev_pm_ops sun4i_spi_pm_ops = {
+ .runtime_resume = sun4i_spi_runtime_resume,
+ .runtime_suspend = sun4i_spi_runtime_suspend,
+};
+
+static struct platform_driver sun4i_spi_driver = {
+ .probe = sun4i_spi_probe,
+ .remove = sun4i_spi_remove,
+ .driver = {
+ .name = "sun4i-spi",
+ .of_match_table = sun4i_spi_match,
+ .pm = &sun4i_spi_pm_ops,
+ },
+};
+module_platform_driver(sun4i_spi_driver);
+
+MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A1X/A20 SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
new file mode 100644
index 000000000..d79853ba7
--- /dev/null
+++ b/drivers/spi/spi-sun6i.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/dmaengine.h>
+
+#include <linux/spi/spi.h>
+
+#define SUN6I_AUTOSUSPEND_TIMEOUT 2000
+
+#define SUN6I_FIFO_DEPTH 128
+#define SUN8I_FIFO_DEPTH 64
+
+#define SUN6I_GBL_CTL_REG 0x04
+#define SUN6I_GBL_CTL_BUS_ENABLE BIT(0)
+#define SUN6I_GBL_CTL_MASTER BIT(1)
+#define SUN6I_GBL_CTL_TP BIT(7)
+#define SUN6I_GBL_CTL_RST BIT(31)
+
+#define SUN6I_TFR_CTL_REG 0x08
+#define SUN6I_TFR_CTL_CPHA BIT(0)
+#define SUN6I_TFR_CTL_CPOL BIT(1)
+#define SUN6I_TFR_CTL_SPOL BIT(2)
+#define SUN6I_TFR_CTL_CS_MASK 0x30
+#define SUN6I_TFR_CTL_CS(cs) (((cs) << 4) & SUN6I_TFR_CTL_CS_MASK)
+#define SUN6I_TFR_CTL_CS_MANUAL BIT(6)
+#define SUN6I_TFR_CTL_CS_LEVEL BIT(7)
+#define SUN6I_TFR_CTL_DHB BIT(8)
+#define SUN6I_TFR_CTL_FBS BIT(12)
+#define SUN6I_TFR_CTL_XCH BIT(31)
+
+#define SUN6I_INT_CTL_REG 0x10
+#define SUN6I_INT_CTL_RF_RDY BIT(0)
+#define SUN6I_INT_CTL_TF_ERQ BIT(4)
+#define SUN6I_INT_CTL_RF_OVF BIT(8)
+#define SUN6I_INT_CTL_TC BIT(12)
+
+#define SUN6I_INT_STA_REG 0x14
+
+#define SUN6I_FIFO_CTL_REG 0x18
+#define SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_MASK 0xff
+#define SUN6I_FIFO_CTL_RF_DRQ_EN BIT(8)
+#define SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_BITS 0
+#define SUN6I_FIFO_CTL_RF_RST BIT(15)
+#define SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_MASK 0xff
+#define SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_BITS 16
+#define SUN6I_FIFO_CTL_TF_DRQ_EN BIT(24)
+#define SUN6I_FIFO_CTL_TF_RST BIT(31)
+
+#define SUN6I_FIFO_STA_REG 0x1c
+#define SUN6I_FIFO_STA_RF_CNT_MASK GENMASK(7, 0)
+#define SUN6I_FIFO_STA_TF_CNT_MASK GENMASK(23, 16)
+
+#define SUN6I_CLK_CTL_REG 0x24
+#define SUN6I_CLK_CTL_CDR2_MASK 0xff
+#define SUN6I_CLK_CTL_CDR2(div) (((div) & SUN6I_CLK_CTL_CDR2_MASK) << 0)
+#define SUN6I_CLK_CTL_CDR1_MASK 0xf
+#define SUN6I_CLK_CTL_CDR1(div) (((div) & SUN6I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN6I_CLK_CTL_DRS BIT(12)
+
+#define SUN6I_MAX_XFER_SIZE 0xffffff
+
+#define SUN6I_BURST_CNT_REG 0x30
+
+#define SUN6I_XMIT_CNT_REG 0x34
+
+#define SUN6I_BURST_CTL_CNT_REG 0x38
+
+#define SUN6I_TXDATA_REG 0x200
+#define SUN6I_RXDATA_REG 0x300
+
+struct sun6i_spi {
+ struct spi_master *master;
+ void __iomem *base_addr;
+ dma_addr_t dma_addr_rx;
+ dma_addr_t dma_addr_tx;
+ struct clk *hclk;
+ struct clk *mclk;
+ struct reset_control *rstc;
+
+ struct completion done;
+ struct completion dma_rx_done;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+ unsigned long fifo_depth;
+};
+
+static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg)
+{
+ return readl(sspi->base_addr + reg);
+}
+
+static inline void sun6i_spi_write(struct sun6i_spi *sspi, u32 reg, u32 value)
+{
+ writel(value, sspi->base_addr + reg);
+}
+
+static inline u32 sun6i_spi_get_rx_fifo_count(struct sun6i_spi *sspi)
+{
+ u32 reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
+
+ return FIELD_GET(SUN6I_FIFO_STA_RF_CNT_MASK, reg);
+}
+
+static inline u32 sun6i_spi_get_tx_fifo_count(struct sun6i_spi *sspi)
+{
+ u32 reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
+
+ return FIELD_GET(SUN6I_FIFO_STA_TF_CNT_MASK, reg);
+}
+
+static inline void sun6i_spi_disable_interrupt(struct sun6i_spi *sspi, u32 mask)
+{
+ u32 reg = sun6i_spi_read(sspi, SUN6I_INT_CTL_REG);
+
+ reg &= ~mask;
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, reg);
+}
+
+static inline void sun6i_spi_drain_fifo(struct sun6i_spi *sspi)
+{
+ u32 len;
+ u8 byte;
+
+ /* See how much data is available */
+ len = sun6i_spi_get_rx_fifo_count(sspi);
+
+ while (len--) {
+ byte = readb(sspi->base_addr + SUN6I_RXDATA_REG);
+ if (sspi->rx_buf)
+ *sspi->rx_buf++ = byte;
+ }
+}
+
+static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi)
+{
+ u32 cnt;
+ int len;
+ u8 byte;
+
+ /* See how much data we can fit */
+ cnt = sspi->fifo_depth - sun6i_spi_get_tx_fifo_count(sspi);
+
+ len = min((int)cnt, sspi->len);
+
+ while (len--) {
+ byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
+ writeb(byte, sspi->base_addr + SUN6I_TXDATA_REG);
+ sspi->len--;
+ }
+}
+
+static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ reg &= ~SUN6I_TFR_CTL_CS_MASK;
+ reg |= SUN6I_TFR_CTL_CS(spi->chip_select);
+
+ if (enable)
+ reg |= SUN6I_TFR_CTL_CS_LEVEL;
+ else
+ reg &= ~SUN6I_TFR_CTL_CS_LEVEL;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
+}
+
+static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
+{
+ return SUN6I_MAX_XFER_SIZE - 1;
+}
+
+static void sun6i_spi_dma_rx_cb(void *param)
+{
+ struct sun6i_spi *sspi = param;
+
+ complete(&sspi->dma_rx_done);
+}
+
+static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
+ struct spi_transfer *tfr)
+{
+ struct dma_async_tx_descriptor *rxdesc, *txdesc;
+ struct spi_master *master = sspi->master;
+
+ rxdesc = NULL;
+ if (tfr->rx_buf) {
+ struct dma_slave_config rxconf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = sspi->dma_addr_rx,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .src_maxburst = 8,
+ };
+
+ dmaengine_slave_config(master->dma_rx, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(master->dma_rx,
+ tfr->rx_sg.sgl,
+ tfr->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!rxdesc)
+ return -EINVAL;
+ rxdesc->callback_param = sspi;
+ rxdesc->callback = sun6i_spi_dma_rx_cb;
+ }
+
+ txdesc = NULL;
+ if (tfr->tx_buf) {
+ struct dma_slave_config txconf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = sspi->dma_addr_tx,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_maxburst = 8,
+ };
+
+ dmaengine_slave_config(master->dma_tx, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(master->dma_tx,
+ tfr->tx_sg.sgl,
+ tfr->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!txdesc) {
+ if (rxdesc)
+ dmaengine_terminate_sync(master->dma_rx);
+ return -EINVAL;
+ }
+ }
+
+ if (tfr->rx_buf) {
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(master->dma_rx);
+ }
+
+ if (tfr->tx_buf) {
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(master->dma_tx);
+ }
+
+ return 0;
+}
+
+static int sun6i_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout;
+ unsigned int start, end, tx_time;
+ unsigned int trig_level;
+ unsigned int tx_len = 0, rx_len = 0;
+ bool use_dma;
+ int ret = 0;
+ u32 reg;
+
+ if (tfr->len > SUN6I_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ reinit_completion(&sspi->done);
+ reinit_completion(&sspi->dma_rx_done);
+ sspi->tx_buf = tfr->tx_buf;
+ sspi->rx_buf = tfr->rx_buf;
+ sspi->len = tfr->len;
+ use_dma = master->can_dma ? master->can_dma(master, spi, tfr) : false;
+
+ /* Clear pending interrupts */
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, ~0);
+
+ /* Reset FIFO */
+ sun6i_spi_write(sspi, SUN6I_FIFO_CTL_REG,
+ SUN6I_FIFO_CTL_RF_RST | SUN6I_FIFO_CTL_TF_RST);
+
+ reg = 0;
+
+ if (!use_dma) {
+ /*
+ * Setup FIFO interrupt trigger level
+ * Here we choose 3/4 of the full fifo depth, as it's
+ * the hardcoded value used in old generation of Allwinner
+ * SPI controller. (See spi-sun4i.c)
+ */
+ trig_level = sspi->fifo_depth / 4 * 3;
+ } else {
+ /*
+ * Setup FIFO DMA request trigger level
+ * We choose 1/2 of the full fifo depth, that value will
+ * be used as DMA burst length.
+ */
+ trig_level = sspi->fifo_depth / 2;
+
+ if (tfr->tx_buf)
+ reg |= SUN6I_FIFO_CTL_TF_DRQ_EN;
+ if (tfr->rx_buf)
+ reg |= SUN6I_FIFO_CTL_RF_DRQ_EN;
+ }
+
+ reg |= (trig_level << SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_BITS) |
+ (trig_level << SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_BITS);
+
+ sun6i_spi_write(sspi, SUN6I_FIFO_CTL_REG, reg);
+
+ /*
+ * Setup the transfer control register: Chip Select,
+ * polarities, etc.
+ */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+
+ if (spi->mode & SPI_CPOL)
+ reg |= SUN6I_TFR_CTL_CPOL;
+ else
+ reg &= ~SUN6I_TFR_CTL_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= SUN6I_TFR_CTL_CPHA;
+ else
+ reg &= ~SUN6I_TFR_CTL_CPHA;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= SUN6I_TFR_CTL_FBS;
+ else
+ reg &= ~SUN6I_TFR_CTL_FBS;
+
+ /*
+ * If it's a TX only transfer, we don't want to fill the RX
+ * FIFO with bogus data
+ */
+ if (sspi->rx_buf) {
+ reg &= ~SUN6I_TFR_CTL_DHB;
+ rx_len = tfr->len;
+ } else {
+ reg |= SUN6I_TFR_CTL_DHB;
+ }
+
+ /* We want to control the chip select manually */
+ reg |= SUN6I_TFR_CTL_CS_MANUAL;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
+
+ /* Ensure that we have a parent clock fast enough */
+ mclk_rate = clk_get_rate(sspi->mclk);
+ if (mclk_rate < (2 * tfr->speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * tfr->speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ cdr)
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Wether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz);
+ div_cdr2 = DIV_ROUND_UP(div_cdr1, 2);
+ if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
+ reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS;
+ tfr->effective_speed_hz = mclk_rate / (2 * div_cdr2);
+ } else {
+ div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1));
+ reg = SUN6I_CLK_CTL_CDR1(div);
+ tfr->effective_speed_hz = mclk_rate / (1 << div);
+ }
+
+ sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
+ /* Finally enable the bus - doing so before might raise SCK to HIGH */
+ reg = sun6i_spi_read(sspi, SUN6I_GBL_CTL_REG);
+ reg |= SUN6I_GBL_CTL_BUS_ENABLE;
+ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg);
+
+ /* Setup the transfer now... */
+ if (sspi->tx_buf)
+ tx_len = tfr->len;
+
+ /* Setup the counters */
+ sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, tfr->len);
+ sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, tx_len);
+ sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG, tx_len);
+
+ if (!use_dma) {
+ /* Fill the TX FIFO */
+ sun6i_spi_fill_fifo(sspi);
+ } else {
+ ret = sun6i_spi_prepare_dma(sspi, tfr);
+ if (ret) {
+ dev_warn(&master->dev,
+ "%s: prepare DMA failed, ret=%d",
+ dev_name(&spi->dev), ret);
+ return ret;
+ }
+ }
+
+ /* Enable the interrupts */
+ reg = SUN6I_INT_CTL_TC;
+
+ if (!use_dma) {
+ if (rx_len > sspi->fifo_depth)
+ reg |= SUN6I_INT_CTL_RF_RDY;
+ if (tx_len > sspi->fifo_depth)
+ reg |= SUN6I_INT_CTL_TF_ERQ;
+ }
+
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, reg);
+
+ /* Start the transfer */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
+ timeout = wait_for_completion_timeout(&sspi->done,
+ msecs_to_jiffies(tx_time));
+
+ if (!use_dma) {
+ sun6i_spi_drain_fifo(sspi);
+ } else {
+ if (timeout && rx_len) {
+ /*
+ * Even though RX on the peripheral side has finished
+ * RX DMA might still be in flight
+ */
+ timeout = wait_for_completion_timeout(&sspi->dma_rx_done,
+ timeout);
+ if (!timeout)
+ dev_warn(&master->dev, "RX DMA timeout\n");
+ }
+ }
+
+ end = jiffies;
+ if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
+ ret = -ETIMEDOUT;
+ }
+
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
+
+ if (ret && use_dma) {
+ dmaengine_terminate_sync(master->dma_rx);
+ dmaengine_terminate_sync(master->dma_tx);
+ }
+
+ return ret;
+}
+
+static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
+{
+ struct sun6i_spi *sspi = dev_id;
+ u32 status = sun6i_spi_read(sspi, SUN6I_INT_STA_REG);
+
+ /* Transfer complete */
+ if (status & SUN6I_INT_CTL_TC) {
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
+ complete(&sspi->done);
+ return IRQ_HANDLED;
+ }
+
+ /* Receive FIFO 3/4 full */
+ if (status & SUN6I_INT_CTL_RF_RDY) {
+ sun6i_spi_drain_fifo(sspi);
+ /* Only clear the interrupt _after_ draining the FIFO */
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_RF_RDY);
+ return IRQ_HANDLED;
+ }
+
+ /* Transmit FIFO 3/4 empty */
+ if (status & SUN6I_INT_CTL_TF_ERQ) {
+ sun6i_spi_fill_fifo(sspi);
+
+ if (!sspi->len)
+ /* nothing left to transmit */
+ sun6i_spi_disable_interrupt(sspi, SUN6I_INT_CTL_TF_ERQ);
+
+ /* Only clear the interrupt _after_ re-seeding the FIFO */
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TF_ERQ);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sun6i_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sspi->hclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable AHB clock\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(sspi->mclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable module clock\n");
+ goto err;
+ }
+
+ ret = reset_control_deassert(sspi->rstc);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert the device from reset\n");
+ goto err2;
+ }
+
+ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
+ SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
+
+ return 0;
+
+err2:
+ clk_disable_unprepare(sspi->mclk);
+err:
+ clk_disable_unprepare(sspi->hclk);
+out:
+ return ret;
+}
+
+static int sun6i_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+
+ reset_control_assert(sspi->rstc);
+ clk_disable_unprepare(sspi->mclk);
+ clk_disable_unprepare(sspi->hclk);
+
+ return 0;
+}
+
+static bool sun6i_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+
+ /*
+ * If the number of spi words to transfer is less or equal than
+ * the fifo length we can just fill the fifo and wait for a single
+ * irq, so don't bother setting up dma
+ */
+ return xfer->len > sspi->fifo_depth;
+}
+
+static int sun6i_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sun6i_spi *sspi;
+ struct resource *mem;
+ int ret = 0, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ sspi->base_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+ if (IS_ERR(sspi->base_addr)) {
+ ret = PTR_ERR(sspi->base_addr);
+ goto err_free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENXIO;
+ goto err_free_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler,
+ 0, "sun6i-spi", sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_free_master;
+ }
+
+ sspi->master = master;
+ sspi->fifo_depth = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ master->max_speed_hz = 100 * 1000 * 1000;
+ master->min_speed_hz = 3 * 1000;
+ master->use_gpio_descriptors = true;
+ master->set_cs = sun6i_spi_set_cs;
+ master->transfer_one = sun6i_spi_transfer_one;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+ master->max_transfer_size = sun6i_spi_max_transfer_size;
+
+ sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sspi->hclk)) {
+ dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+ ret = PTR_ERR(sspi->hclk);
+ goto err_free_master;
+ }
+
+ sspi->mclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sspi->mclk)) {
+ dev_err(&pdev->dev, "Unable to acquire module clock\n");
+ ret = PTR_ERR(sspi->mclk);
+ goto err_free_master;
+ }
+
+ init_completion(&sspi->done);
+ init_completion(&sspi->dma_rx_done);
+
+ sspi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(sspi->rstc)) {
+ dev_err(&pdev->dev, "Couldn't get reset controller\n");
+ ret = PTR_ERR(sspi->rstc);
+ goto err_free_master;
+ }
+
+ master->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(master->dma_tx)) {
+ /* Check tx to see if we need defer probing driver */
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_free_master;
+ }
+ dev_warn(&pdev->dev, "Failed to request TX DMA channel\n");
+ master->dma_tx = NULL;
+ }
+
+ master->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(master->dma_rx)) {
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_free_dma_tx;
+ }
+ dev_warn(&pdev->dev, "Failed to request RX DMA channel\n");
+ master->dma_rx = NULL;
+ }
+
+ if (master->dma_tx && master->dma_rx) {
+ sspi->dma_addr_tx = mem->start + SUN6I_TXDATA_REG;
+ sspi->dma_addr_rx = mem->start + SUN6I_RXDATA_REG;
+ master->can_dma = sun6i_spi_can_dma;
+ }
+
+ /*
+ * This wake-up/shutdown pattern is to be able to have the
+ * device woken up, even if runtime_pm is disabled
+ */
+ ret = sun6i_spi_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't resume the device\n");
+ goto err_free_dma_rx;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SUN6I_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ goto err_pm_disable;
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ sun6i_spi_runtime_suspend(&pdev->dev);
+err_free_dma_rx:
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
+err_free_dma_tx:
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sun6i_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+
+ pm_runtime_force_suspend(&pdev->dev);
+
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
+ return 0;
+}
+
+static const struct of_device_id sun6i_spi_match[] = {
+ { .compatible = "allwinner,sun6i-a31-spi", .data = (void *)SUN6I_FIFO_DEPTH },
+ { .compatible = "allwinner,sun8i-h3-spi", .data = (void *)SUN8I_FIFO_DEPTH },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun6i_spi_match);
+
+static const struct dev_pm_ops sun6i_spi_pm_ops = {
+ .runtime_resume = sun6i_spi_runtime_resume,
+ .runtime_suspend = sun6i_spi_runtime_suspend,
+};
+
+static struct platform_driver sun6i_spi_driver = {
+ .probe = sun6i_spi_probe,
+ .remove = sun6i_spi_remove,
+ .driver = {
+ .name = "sun6i-spi",
+ .of_match_table = sun6i_spi_match,
+ .pm = &sun6i_spi_pm_ops,
+ },
+};
+module_platform_driver(sun6i_spi_driver);
+
+MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sunplus-sp7021.c b/drivers/spi/spi-sunplus-sp7021.c
new file mode 100644
index 000000000..f1fa88777
--- /dev/null
+++ b/drivers/spi/spi-sunplus-sp7021.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2021 Sunplus Inc.
+// Author: Li-hao Kuo <lhjeff911@gmail.com>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SP7021_DATA_RDY_REG 0x0044
+#define SP7021_SLAVE_DMA_CTRL_REG 0x0048
+#define SP7021_SLAVE_DMA_LENGTH_REG 0x004c
+#define SP7021_SLAVE_DMA_ADDR_REG 0x004c
+
+#define SP7021_SLAVE_DATA_RDY BIT(0)
+#define SP7021_SLAVE_SW_RST BIT(1)
+#define SP7021_SLA_DMA_W_INT BIT(8)
+#define SP7021_SLAVE_CLR_INT BIT(8)
+#define SP7021_SLAVE_DMA_EN BIT(0)
+#define SP7021_SLAVE_DMA_RW BIT(6)
+#define SP7021_SLAVE_DMA_CMD GENMASK(3, 2)
+
+#define SP7021_FIFO_REG 0x0034
+#define SP7021_SPI_STATUS_REG 0x0038
+#define SP7021_SPI_CONFIG_REG 0x003c
+#define SP7021_INT_BUSY_REG 0x004c
+#define SP7021_DMA_CTRL_REG 0x0050
+
+#define SP7021_SPI_START_FD BIT(0)
+#define SP7021_FD_SW_RST BIT(1)
+#define SP7021_TX_EMP_FLAG BIT(2)
+#define SP7021_RX_EMP_FLAG BIT(4)
+#define SP7021_RX_FULL_FLAG BIT(5)
+#define SP7021_FINISH_FLAG BIT(6)
+
+#define SP7021_TX_CNT_MASK GENMASK(11, 8)
+#define SP7021_RX_CNT_MASK GENMASK(15, 12)
+#define SP7021_TX_LEN_MASK GENMASK(23, 16)
+#define SP7021_GET_LEN_MASK GENMASK(31, 24)
+#define SP7021_SET_TX_LEN GENMASK(23, 16)
+#define SP7021_SET_XFER_LEN GENMASK(31, 24)
+
+#define SP7021_CPOL_FD BIT(0)
+#define SP7021_CPHA_R BIT(1)
+#define SP7021_CPHA_W BIT(2)
+#define SP7021_LSB_SEL BIT(4)
+#define SP7021_CS_POR BIT(5)
+#define SP7021_FD_SEL BIT(6)
+
+#define SP7021_RX_UNIT GENMASK(8, 7)
+#define SP7021_TX_UNIT GENMASK(10, 9)
+#define SP7021_TX_EMP_FLAG_MASK BIT(11)
+#define SP7021_RX_FULL_FLAG_MASK BIT(14)
+#define SP7021_FINISH_FLAG_MASK BIT(15)
+#define SP7021_CLEAN_RW_BYTE GENMASK(10, 7)
+#define SP7021_CLEAN_FLUG_MASK GENMASK(15, 11)
+#define SP7021_CLK_MASK GENMASK(31, 16)
+
+#define SP7021_INT_BYPASS BIT(3)
+#define SP7021_CLR_MASTER_INT BIT(6)
+
+#define SP7021_SPI_DATA_SIZE (255)
+#define SP7021_FIFO_DATA_LEN (16)
+
+enum {
+ SP7021_MASTER_MODE = 0,
+ SP7021_SLAVE_MODE = 1,
+};
+
+struct sp7021_spi_ctlr {
+ struct device *dev;
+ struct spi_controller *ctlr;
+ void __iomem *m_base;
+ void __iomem *s_base;
+ u32 xfer_conf;
+ int mode;
+ int m_irq;
+ int s_irq;
+ struct clk *spi_clk;
+ struct reset_control *rstc;
+ // data xfer lock
+ struct mutex buf_lock;
+ struct completion isr_done;
+ struct completion slave_isr;
+ unsigned int rx_cur_len;
+ unsigned int tx_cur_len;
+ unsigned int data_unit;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+};
+
+static irqreturn_t sp7021_spi_slave_irq(int irq, void *dev)
+{
+ struct sp7021_spi_ctlr *pspim = dev;
+ unsigned int data_status;
+
+ data_status = readl(pspim->s_base + SP7021_DATA_RDY_REG);
+ data_status |= SP7021_SLAVE_CLR_INT;
+ writel(data_status , pspim->s_base + SP7021_DATA_RDY_REG);
+ complete(&pspim->slave_isr);
+ return IRQ_HANDLED;
+}
+
+static int sp7021_spi_slave_abort(struct spi_controller *ctlr)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ complete(&pspim->slave_isr);
+ complete(&pspim->isr_done);
+ return 0;
+}
+
+static int sp7021_spi_slave_tx(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
+ u32 value;
+
+ reinit_completion(&pspim->slave_isr);
+ value = SP7021_SLAVE_DMA_EN | SP7021_SLAVE_DMA_RW | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3);
+ writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
+ writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG);
+ writel(xfer->tx_dma, pspim->s_base + SP7021_SLAVE_DMA_ADDR_REG);
+ value = readl(pspim->s_base + SP7021_DATA_RDY_REG);
+ value |= SP7021_SLAVE_DATA_RDY;
+ writel(value, pspim->s_base + SP7021_DATA_RDY_REG);
+ if (wait_for_completion_interruptible(&pspim->isr_done)) {
+ dev_err(&spi->dev, "%s() wait_for_completion err\n", __func__);
+ return -EINTR;
+ }
+ return 0;
+}
+
+static int sp7021_spi_slave_rx(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
+ u32 value;
+
+ reinit_completion(&pspim->isr_done);
+ value = SP7021_SLAVE_DMA_EN | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3);
+ writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
+ writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG);
+ writel(xfer->rx_dma, pspim->s_base + SP7021_SLAVE_DMA_ADDR_REG);
+ if (wait_for_completion_interruptible(&pspim->isr_done)) {
+ dev_err(&spi->dev, "%s() wait_for_completion err\n", __func__);
+ return -EINTR;
+ }
+ writel(SP7021_SLAVE_SW_RST, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
+ return 0;
+}
+
+static void sp7021_spi_master_rb(struct sp7021_spi_ctlr *pspim, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ pspim->rx_buf[pspim->rx_cur_len] =
+ readl(pspim->m_base + SP7021_FIFO_REG);
+ pspim->rx_cur_len++;
+ }
+}
+
+static void sp7021_spi_master_wb(struct sp7021_spi_ctlr *pspim, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ writel(pspim->tx_buf[pspim->tx_cur_len],
+ pspim->m_base + SP7021_FIFO_REG);
+ pspim->tx_cur_len++;
+ }
+}
+
+static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
+{
+ struct sp7021_spi_ctlr *pspim = dev;
+ unsigned int tx_cnt, total_len;
+ unsigned int tx_len, rx_cnt;
+ unsigned int fd_status;
+ bool isrdone = false;
+ u32 value;
+
+ fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ tx_cnt = FIELD_GET(SP7021_TX_CNT_MASK, fd_status);
+ tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status);
+ total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
+
+ if ((fd_status & SP7021_TX_EMP_FLAG) && (fd_status & SP7021_RX_EMP_FLAG) && total_len == 0)
+ return IRQ_NONE;
+
+ if (tx_len == 0 && total_len == 0)
+ return IRQ_NONE;
+
+ rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
+ if (fd_status & SP7021_RX_FULL_FLAG)
+ rx_cnt = pspim->data_unit;
+
+ tx_cnt = min(tx_len - pspim->tx_cur_len, pspim->data_unit - tx_cnt);
+ dev_dbg(pspim->dev, "fd_st=0x%x rx_c:%d tx_c:%d tx_l:%d",
+ fd_status, rx_cnt, tx_cnt, tx_len);
+
+ if (rx_cnt > 0)
+ sp7021_spi_master_rb(pspim, rx_cnt);
+ if (tx_cnt > 0)
+ sp7021_spi_master_wb(pspim, tx_cnt);
+
+ fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status);
+ total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
+
+ if (fd_status & SP7021_FINISH_FLAG || tx_len == pspim->tx_cur_len) {
+ while (total_len != pspim->rx_cur_len) {
+ fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
+ if (fd_status & SP7021_RX_FULL_FLAG)
+ rx_cnt = pspim->data_unit;
+ else
+ rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
+
+ if (rx_cnt > 0)
+ sp7021_spi_master_rb(pspim, rx_cnt);
+ }
+ value = readl(pspim->m_base + SP7021_INT_BUSY_REG);
+ value |= SP7021_CLR_MASTER_INT;
+ writel(value, pspim->m_base + SP7021_INT_BUSY_REG);
+ writel(SP7021_FINISH_FLAG, pspim->m_base + SP7021_SPI_STATUS_REG);
+ isrdone = true;
+ }
+
+ if (isrdone)
+ complete(&pspim->isr_done);
+ return IRQ_HANDLED;
+}
+
+static void sp7021_prep_transfer(struct spi_controller *ctlr, struct spi_device *spi)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ pspim->tx_cur_len = 0;
+ pspim->rx_cur_len = 0;
+ pspim->data_unit = SP7021_FIFO_DATA_LEN;
+}
+
+// preliminary set CS, CPOL, CPHA and LSB
+static int sp7021_spi_controller_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct spi_device *s = msg->spi;
+ u32 valus, rs = 0;
+
+ valus = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ valus |= SP7021_FD_SW_RST;
+ writel(valus, pspim->m_base + SP7021_SPI_STATUS_REG);
+ rs |= SP7021_FD_SEL;
+ if (s->mode & SPI_CPOL)
+ rs |= SP7021_CPOL_FD;
+
+ if (s->mode & SPI_LSB_FIRST)
+ rs |= SP7021_LSB_SEL;
+
+ if (s->mode & SPI_CS_HIGH)
+ rs |= SP7021_CS_POR;
+
+ if (s->mode & SPI_CPHA)
+ rs |= SP7021_CPHA_R;
+ else
+ rs |= SP7021_CPHA_W;
+
+ rs |= FIELD_PREP(SP7021_TX_UNIT, 0) | FIELD_PREP(SP7021_RX_UNIT, 0);
+ pspim->xfer_conf = rs;
+ if (pspim->xfer_conf & SP7021_CPOL_FD)
+ writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
+
+ return 0;
+}
+
+static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ u32 clk_rate, clk_sel, div;
+
+ clk_rate = clk_get_rate(pspim->spi_clk);
+ div = max(2U, clk_rate / xfer->speed_hz);
+
+ clk_sel = (div / 2) - 1;
+ pspim->xfer_conf &= ~SP7021_CLK_MASK;
+ pspim->xfer_conf |= FIELD_PREP(SP7021_CLK_MASK, clk_sel);
+ writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
+}
+
+static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ unsigned long timeout = msecs_to_jiffies(1000);
+ unsigned int xfer_cnt, xfer_len, last_len;
+ unsigned int i, len_temp;
+ u32 reg_temp;
+
+ xfer_cnt = xfer->len / SP7021_SPI_DATA_SIZE;
+ last_len = xfer->len % SP7021_SPI_DATA_SIZE;
+
+ for (i = 0; i <= xfer_cnt; i++) {
+ mutex_lock(&pspim->buf_lock);
+ sp7021_prep_transfer(ctlr, spi);
+ sp7021_spi_setup_clk(ctlr, xfer);
+ reinit_completion(&pspim->isr_done);
+
+ if (i == xfer_cnt)
+ xfer_len = last_len;
+ else
+ xfer_len = SP7021_SPI_DATA_SIZE;
+
+ pspim->tx_buf = xfer->tx_buf + i * SP7021_SPI_DATA_SIZE;
+ pspim->rx_buf = xfer->rx_buf + i * SP7021_SPI_DATA_SIZE;
+
+ if (pspim->tx_cur_len < xfer_len) {
+ len_temp = min(pspim->data_unit, xfer_len);
+ sp7021_spi_master_wb(pspim, len_temp);
+ }
+ reg_temp = readl(pspim->m_base + SP7021_SPI_CONFIG_REG);
+ reg_temp &= ~SP7021_CLEAN_RW_BYTE;
+ reg_temp &= ~SP7021_CLEAN_FLUG_MASK;
+ reg_temp |= SP7021_FD_SEL | SP7021_FINISH_FLAG_MASK |
+ SP7021_TX_EMP_FLAG_MASK | SP7021_RX_FULL_FLAG_MASK |
+ FIELD_PREP(SP7021_TX_UNIT, 0) | FIELD_PREP(SP7021_RX_UNIT, 0);
+ writel(reg_temp, pspim->m_base + SP7021_SPI_CONFIG_REG);
+
+ reg_temp = FIELD_PREP(SP7021_SET_TX_LEN, xfer_len) |
+ FIELD_PREP(SP7021_SET_XFER_LEN, xfer_len) |
+ SP7021_SPI_START_FD;
+ writel(reg_temp, pspim->m_base + SP7021_SPI_STATUS_REG);
+
+ if (!wait_for_completion_interruptible_timeout(&pspim->isr_done, timeout)) {
+ dev_err(&spi->dev, "wait_for_completion err\n");
+ mutex_unlock(&pspim->buf_lock);
+ return -ETIMEDOUT;
+ }
+
+ reg_temp = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
+ if (reg_temp & SP7021_FINISH_FLAG) {
+ writel(SP7021_FINISH_FLAG, pspim->m_base + SP7021_SPI_STATUS_REG);
+ writel(readl(pspim->m_base + SP7021_SPI_CONFIG_REG) &
+ SP7021_CLEAN_FLUG_MASK, pspim->m_base + SP7021_SPI_CONFIG_REG);
+ }
+
+ if (pspim->xfer_conf & SP7021_CPOL_FD)
+ writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
+
+ mutex_unlock(&pspim->buf_lock);
+ }
+ return 0;
+}
+
+static int sp7021_spi_slave_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct device *dev = pspim->dev;
+ int ret;
+
+ if (xfer->tx_buf && !xfer->rx_buf) {
+ xfer->tx_dma = dma_map_single(dev, (void *)xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, xfer->tx_dma))
+ return -ENOMEM;
+ ret = sp7021_spi_slave_tx(spi, xfer);
+ dma_unmap_single(dev, xfer->tx_dma, xfer->len, DMA_TO_DEVICE);
+ } else if (xfer->rx_buf && !xfer->tx_buf) {
+ xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, xfer->rx_dma))
+ return -ENOMEM;
+ ret = sp7021_spi_slave_rx(spi, xfer);
+ dma_unmap_single(dev, xfer->rx_dma, xfer->len, DMA_FROM_DEVICE);
+ } else {
+ dev_dbg(&ctlr->dev, "%s() wrong command\n", __func__);
+ return -EINVAL;
+ }
+
+ spi_finalize_current_transfer(ctlr);
+ return ret;
+}
+
+static void sp7021_spi_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void sp7021_spi_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
+static int sp7021_spi_controller_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp7021_spi_ctlr *pspim;
+ struct spi_controller *ctlr;
+ int mode, ret;
+
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "sp_spi");
+
+ if (device_property_read_bool(dev, "spi-slave"))
+ mode = SP7021_SLAVE_MODE;
+ else
+ mode = SP7021_MASTER_MODE;
+
+ if (mode == SP7021_SLAVE_MODE)
+ ctlr = devm_spi_alloc_slave(dev, sizeof(*pspim));
+ else
+ ctlr = devm_spi_alloc_master(dev, sizeof(*pspim));
+ if (!ctlr)
+ return -ENOMEM;
+ device_set_node(&ctlr->dev, dev_fwnode(dev));
+ ctlr->bus_num = pdev->id;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ ctlr->auto_runtime_pm = true;
+ ctlr->prepare_message = sp7021_spi_controller_prepare_message;
+ if (mode == SP7021_SLAVE_MODE) {
+ ctlr->transfer_one = sp7021_spi_slave_transfer_one;
+ ctlr->slave_abort = sp7021_spi_slave_abort;
+ ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ } else {
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->min_speed_hz = 40000;
+ ctlr->max_speed_hz = 25000000;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ ctlr->transfer_one = sp7021_spi_master_transfer_one;
+ }
+ platform_set_drvdata(pdev, ctlr);
+ pspim = spi_controller_get_devdata(ctlr);
+ pspim->mode = mode;
+ pspim->ctlr = ctlr;
+ pspim->dev = dev;
+ mutex_init(&pspim->buf_lock);
+ init_completion(&pspim->isr_done);
+ init_completion(&pspim->slave_isr);
+
+ pspim->m_base = devm_platform_ioremap_resource_byname(pdev, "master");
+ if (IS_ERR(pspim->m_base))
+ return dev_err_probe(dev, PTR_ERR(pspim->m_base), "m_base get fail\n");
+
+ pspim->s_base = devm_platform_ioremap_resource_byname(pdev, "slave");
+ if (IS_ERR(pspim->s_base))
+ return dev_err_probe(dev, PTR_ERR(pspim->s_base), "s_base get fail\n");
+
+ pspim->m_irq = platform_get_irq_byname(pdev, "master_risc");
+ if (pspim->m_irq < 0)
+ return pspim->m_irq;
+
+ pspim->s_irq = platform_get_irq_byname(pdev, "slave_risc");
+ if (pspim->s_irq < 0)
+ return pspim->s_irq;
+
+ pspim->spi_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pspim->spi_clk))
+ return dev_err_probe(dev, PTR_ERR(pspim->spi_clk), "clk get fail\n");
+
+ pspim->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(pspim->rstc))
+ return dev_err_probe(dev, PTR_ERR(pspim->rstc), "rst get fail\n");
+
+ ret = clk_prepare_enable(pspim->spi_clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clk\n");
+
+ ret = devm_add_action_or_reset(dev, sp7021_spi_disable_unprepare, pspim->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(pspim->rstc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to deassert reset\n");
+
+ ret = devm_add_action_or_reset(dev, sp7021_spi_reset_control_assert, pspim->rstc);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, pspim->m_irq, sp7021_spi_master_irq,
+ IRQF_TRIGGER_RISING, pdev->name, pspim);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, pspim->s_irq, sp7021_spi_slave_irq,
+ IRQF_TRIGGER_RISING, pdev->name, pspim);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ pm_runtime_disable(dev);
+ return dev_err_probe(dev, ret, "spi_register_master fail\n");
+ }
+ return 0;
+}
+
+static int sp7021_spi_controller_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
+
+ spi_unregister_controller(ctlr);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused sp7021_spi_controller_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ return reset_control_assert(pspim->rstc);
+}
+
+static int __maybe_unused sp7021_spi_controller_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ reset_control_deassert(pspim->rstc);
+ return clk_prepare_enable(pspim->spi_clk);
+}
+
+#ifdef CONFIG_PM
+static int sp7021_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ return reset_control_assert(pspim->rstc);
+}
+
+static int sp7021_spi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+
+ return reset_control_deassert(pspim->rstc);
+}
+#endif
+
+static const struct dev_pm_ops sp7021_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(sp7021_spi_runtime_suspend,
+ sp7021_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(sp7021_spi_controller_suspend,
+ sp7021_spi_controller_resume)
+};
+
+static const struct of_device_id sp7021_spi_controller_ids[] = {
+ { .compatible = "sunplus,sp7021-spi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sp7021_spi_controller_ids);
+
+static struct platform_driver sp7021_spi_controller_driver = {
+ .probe = sp7021_spi_controller_probe,
+ .remove = sp7021_spi_controller_remove,
+ .driver = {
+ .name = "sunplus,sp7021-spi-controller",
+ .of_match_table = sp7021_spi_controller_ids,
+ .pm = &sp7021_spi_pm_ops,
+ },
+};
+module_platform_driver(sp7021_spi_controller_driver);
+
+MODULE_AUTHOR("Li-hao Kuo <lhjeff911@gmail.com>");
+MODULE_DESCRIPTION("Sunplus SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
new file mode 100644
index 000000000..dc188f920
--- /dev/null
+++ b/drivers/spi/spi-synquacer.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Synquacer HSSPI controller driver
+//
+// Copyright (c) 2015-2018 Socionext Inc.
+// Copyright (c) 2018-2019 Linaro Ltd.
+//
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+
+/* HSSPI register address definitions */
+#define SYNQUACER_HSSPI_REG_MCTRL 0x00
+#define SYNQUACER_HSSPI_REG_PCC0 0x04
+#define SYNQUACER_HSSPI_REG_PCC(n) (SYNQUACER_HSSPI_REG_PCC0 + (n) * 4)
+#define SYNQUACER_HSSPI_REG_TXF 0x14
+#define SYNQUACER_HSSPI_REG_TXE 0x18
+#define SYNQUACER_HSSPI_REG_TXC 0x1C
+#define SYNQUACER_HSSPI_REG_RXF 0x20
+#define SYNQUACER_HSSPI_REG_RXE 0x24
+#define SYNQUACER_HSSPI_REG_RXC 0x28
+#define SYNQUACER_HSSPI_REG_FAULTF 0x2C
+#define SYNQUACER_HSSPI_REG_FAULTC 0x30
+#define SYNQUACER_HSSPI_REG_DMCFG 0x34
+#define SYNQUACER_HSSPI_REG_DMSTART 0x38
+#define SYNQUACER_HSSPI_REG_DMBCC 0x3C
+#define SYNQUACER_HSSPI_REG_DMSTATUS 0x40
+#define SYNQUACER_HSSPI_REG_FIFOCFG 0x4C
+#define SYNQUACER_HSSPI_REG_TX_FIFO 0x50
+#define SYNQUACER_HSSPI_REG_RX_FIFO 0x90
+#define SYNQUACER_HSSPI_REG_MID 0xFC
+
+/* HSSPI register bit definitions */
+#define SYNQUACER_HSSPI_MCTRL_MEN BIT(0)
+#define SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN BIT(1)
+#define SYNQUACER_HSSPI_MCTRL_CDSS BIT(3)
+#define SYNQUACER_HSSPI_MCTRL_MES BIT(4)
+#define SYNQUACER_HSSPI_MCTRL_SYNCON BIT(5)
+
+#define SYNQUACER_HSSPI_PCC_CPHA BIT(0)
+#define SYNQUACER_HSSPI_PCC_CPOL BIT(1)
+#define SYNQUACER_HSSPI_PCC_ACES BIT(2)
+#define SYNQUACER_HSSPI_PCC_RTM BIT(3)
+#define SYNQUACER_HSSPI_PCC_SSPOL BIT(4)
+#define SYNQUACER_HSSPI_PCC_SDIR BIT(7)
+#define SYNQUACER_HSSPI_PCC_SENDIAN BIT(8)
+#define SYNQUACER_HSSPI_PCC_SAFESYNC BIT(16)
+#define SYNQUACER_HSSPI_PCC_SS2CD_SHIFT 5U
+#define SYNQUACER_HSSPI_PCC_CDRS_MASK 0x7f
+#define SYNQUACER_HSSPI_PCC_CDRS_SHIFT 9U
+
+#define SYNQUACER_HSSPI_TXF_FIFO_FULL BIT(0)
+#define SYNQUACER_HSSPI_TXF_FIFO_EMPTY BIT(1)
+#define SYNQUACER_HSSPI_TXF_SLAVE_RELEASED BIT(6)
+
+#define SYNQUACER_HSSPI_TXE_FIFO_FULL BIT(0)
+#define SYNQUACER_HSSPI_TXE_FIFO_EMPTY BIT(1)
+#define SYNQUACER_HSSPI_TXE_SLAVE_RELEASED BIT(6)
+
+#define SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD BIT(5)
+#define SYNQUACER_HSSPI_RXF_SLAVE_RELEASED BIT(6)
+
+#define SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD BIT(5)
+#define SYNQUACER_HSSPI_RXE_SLAVE_RELEASED BIT(6)
+
+#define SYNQUACER_HSSPI_DMCFG_SSDC BIT(1)
+#define SYNQUACER_HSSPI_DMCFG_MSTARTEN BIT(2)
+
+#define SYNQUACER_HSSPI_DMSTART_START BIT(0)
+#define SYNQUACER_HSSPI_DMSTOP_STOP BIT(8)
+#define SYNQUACER_HSSPI_DMPSEL_CS_MASK 0x3
+#define SYNQUACER_HSSPI_DMPSEL_CS_SHIFT 16U
+#define SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT 24U
+#define SYNQUACER_HSSPI_DMTRP_DATA_MASK 0x3
+#define SYNQUACER_HSSPI_DMTRP_DATA_SHIFT 26U
+#define SYNQUACER_HSSPI_DMTRP_DATA_TXRX 0
+#define SYNQUACER_HSSPI_DMTRP_DATA_RX 1
+#define SYNQUACER_HSSPI_DMTRP_DATA_TX 2
+
+#define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK 0x1f
+#define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT 8U
+#define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK 0x1f
+#define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT 16U
+
+#define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK 0xf
+#define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT 0U
+#define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_MASK 0xf
+#define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_SHIFT 4U
+#define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK 0x3
+#define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT 8U
+#define SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH BIT(11)
+#define SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH BIT(12)
+
+#define SYNQUACER_HSSPI_FIFO_DEPTH 16U
+#define SYNQUACER_HSSPI_FIFO_TX_THRESHOLD 4U
+#define SYNQUACER_HSSPI_FIFO_RX_THRESHOLD \
+ (SYNQUACER_HSSPI_FIFO_DEPTH - SYNQUACER_HSSPI_FIFO_TX_THRESHOLD)
+
+#define SYNQUACER_HSSPI_TRANSFER_MODE_TX BIT(1)
+#define SYNQUACER_HSSPI_TRANSFER_MODE_RX BIT(2)
+#define SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC 2000U
+#define SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC 1000U
+
+#define SYNQUACER_HSSPI_CLOCK_SRC_IHCLK 0
+#define SYNQUACER_HSSPI_CLOCK_SRC_IPCLK 1
+
+#define SYNQUACER_HSSPI_NUM_CHIP_SELECT 4U
+#define SYNQUACER_HSSPI_IRQ_NAME_MAX 32U
+
+struct synquacer_spi {
+ struct device *dev;
+ struct completion transfer_done;
+ unsigned int cs;
+ unsigned int bpw;
+ unsigned int mode;
+ unsigned int speed;
+ bool aces, rtm;
+ void *rx_buf;
+ const void *tx_buf;
+ struct clk *clk;
+ int clk_src_type;
+ void __iomem *regs;
+ u32 tx_words, rx_words;
+ unsigned int bus_width;
+ unsigned int transfer_mode;
+ char rx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX];
+ char tx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX];
+};
+
+static int read_fifo(struct synquacer_spi *sspi)
+{
+ u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS);
+
+ len = (len >> SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT) &
+ SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK;
+ len = min(len, sspi->rx_words);
+
+ switch (sspi->bpw) {
+ case 8: {
+ u8 *buf = sspi->rx_buf;
+
+ ioread8_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
+ buf, len);
+ sspi->rx_buf = buf + len;
+ break;
+ }
+ case 16: {
+ u16 *buf = sspi->rx_buf;
+
+ ioread16_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
+ buf, len);
+ sspi->rx_buf = buf + len;
+ break;
+ }
+ case 24:
+ /* fallthrough, should use 32-bits access */
+ case 32: {
+ u32 *buf = sspi->rx_buf;
+
+ ioread32_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
+ buf, len);
+ sspi->rx_buf = buf + len;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ sspi->rx_words -= len;
+ return 0;
+}
+
+static int write_fifo(struct synquacer_spi *sspi)
+{
+ u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS);
+
+ len = (len >> SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT) &
+ SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK;
+ len = min(SYNQUACER_HSSPI_FIFO_DEPTH - len,
+ sspi->tx_words);
+
+ switch (sspi->bpw) {
+ case 8: {
+ const u8 *buf = sspi->tx_buf;
+
+ iowrite8_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
+ buf, len);
+ sspi->tx_buf = buf + len;
+ break;
+ }
+ case 16: {
+ const u16 *buf = sspi->tx_buf;
+
+ iowrite16_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
+ buf, len);
+ sspi->tx_buf = buf + len;
+ break;
+ }
+ case 24:
+ /* fallthrough, should use 32-bits access */
+ case 32: {
+ const u32 *buf = sspi->tx_buf;
+
+ iowrite32_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
+ buf, len);
+ sspi->tx_buf = buf + len;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ sspi->tx_words -= len;
+ return 0;
+}
+
+static int synquacer_spi_config(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ unsigned int speed, mode, bpw, cs, bus_width, transfer_mode;
+ u32 rate, val, div;
+
+ /* Full Duplex only on 1-bit wide bus */
+ if (xfer->rx_buf && xfer->tx_buf &&
+ (xfer->rx_nbits != 1 || xfer->tx_nbits != 1)) {
+ dev_err(sspi->dev,
+ "RX and TX bus widths must be 1-bit for Full-Duplex!\n");
+ return -EINVAL;
+ }
+
+ if (xfer->tx_buf) {
+ bus_width = xfer->tx_nbits;
+ transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_TX;
+ } else {
+ bus_width = xfer->rx_nbits;
+ transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_RX;
+ }
+
+ mode = spi->mode;
+ cs = spi->chip_select;
+ speed = xfer->speed_hz;
+ bpw = xfer->bits_per_word;
+
+ /* return if nothing to change */
+ if (speed == sspi->speed &&
+ bus_width == sspi->bus_width && bpw == sspi->bpw &&
+ mode == sspi->mode && cs == sspi->cs &&
+ transfer_mode == sspi->transfer_mode) {
+ return 0;
+ }
+
+ sspi->transfer_mode = transfer_mode;
+ rate = master->max_speed_hz;
+
+ div = DIV_ROUND_UP(rate, speed);
+ if (div > 254) {
+ dev_err(sspi->dev, "Requested rate too low (%u)\n",
+ sspi->speed);
+ return -EINVAL;
+ }
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs));
+ val &= ~SYNQUACER_HSSPI_PCC_SAFESYNC;
+ if (bpw == 8 && (mode & (SPI_TX_DUAL | SPI_RX_DUAL)) && div < 3)
+ val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
+ if (bpw == 8 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 6)
+ val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
+ if (bpw == 16 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 3)
+ val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
+
+ if (mode & SPI_CPHA)
+ val |= SYNQUACER_HSSPI_PCC_CPHA;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_CPHA;
+
+ if (mode & SPI_CPOL)
+ val |= SYNQUACER_HSSPI_PCC_CPOL;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_CPOL;
+
+ if (mode & SPI_CS_HIGH)
+ val |= SYNQUACER_HSSPI_PCC_SSPOL;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_SSPOL;
+
+ if (mode & SPI_LSB_FIRST)
+ val |= SYNQUACER_HSSPI_PCC_SDIR;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_SDIR;
+
+ if (sspi->aces)
+ val |= SYNQUACER_HSSPI_PCC_ACES;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_ACES;
+
+ if (sspi->rtm)
+ val |= SYNQUACER_HSSPI_PCC_RTM;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_RTM;
+
+ val |= (3 << SYNQUACER_HSSPI_PCC_SS2CD_SHIFT);
+ val |= SYNQUACER_HSSPI_PCC_SENDIAN;
+
+ val &= ~(SYNQUACER_HSSPI_PCC_CDRS_MASK <<
+ SYNQUACER_HSSPI_PCC_CDRS_SHIFT);
+ val |= ((div >> 1) << SYNQUACER_HSSPI_PCC_CDRS_SHIFT);
+
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs));
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+ val &= ~(SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK <<
+ SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT);
+ val |= ((bpw / 8 - 1) << SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT);
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ val &= ~(SYNQUACER_HSSPI_DMTRP_DATA_MASK <<
+ SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
+
+ if (xfer->rx_buf)
+ val |= (SYNQUACER_HSSPI_DMTRP_DATA_RX <<
+ SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
+ else
+ val |= (SYNQUACER_HSSPI_DMTRP_DATA_TX <<
+ SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
+
+ val &= ~(3 << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT);
+ val |= ((bus_width >> 1) << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT);
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+
+ sspi->bpw = bpw;
+ sspi->mode = mode;
+ sspi->speed = speed;
+ sspi->cs = spi->chip_select;
+ sspi->bus_width = bus_width;
+
+ return 0;
+}
+
+static int synquacer_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+ int status = 0;
+ u32 words;
+ u8 bpw;
+ u32 val;
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ val &= ~SYNQUACER_HSSPI_DMSTOP_STOP;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+ val |= SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH;
+ val |= SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+
+ /*
+ * See if we can transfer 4-bytes as 1 word
+ * to maximize the FIFO buffer efficiency.
+ */
+ bpw = xfer->bits_per_word;
+ if (bpw == 8 && !(xfer->len % 4) && !(spi->mode & SPI_LSB_FIRST))
+ xfer->bits_per_word = 32;
+
+ ret = synquacer_spi_config(master, spi, xfer);
+
+ /* restore */
+ xfer->bits_per_word = bpw;
+
+ if (ret)
+ return ret;
+
+ reinit_completion(&sspi->transfer_done);
+
+ sspi->tx_buf = xfer->tx_buf;
+ sspi->rx_buf = xfer->rx_buf;
+
+ switch (sspi->bpw) {
+ case 8:
+ words = xfer->len;
+ break;
+ case 16:
+ words = xfer->len / 2;
+ break;
+ case 24:
+ /* fallthrough, should use 32-bits access */
+ case 32:
+ words = xfer->len / 4;
+ break;
+ default:
+ dev_err(sspi->dev, "unsupported bpw: %d\n", sspi->bpw);
+ return -EINVAL;
+ }
+
+ if (xfer->tx_buf)
+ sspi->tx_words = words;
+ else
+ sspi->tx_words = 0;
+
+ if (xfer->rx_buf)
+ sspi->rx_words = words;
+ else
+ sspi->rx_words = 0;
+
+ if (xfer->tx_buf) {
+ status = write_fifo(sspi);
+ if (status < 0) {
+ dev_err(sspi->dev, "failed write_fifo. status: 0x%x\n",
+ status);
+ return status;
+ }
+ }
+
+ if (xfer->rx_buf) {
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+ val &= ~(SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK <<
+ SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT);
+ val |= ((sspi->rx_words > SYNQUACER_HSSPI_FIFO_DEPTH ?
+ SYNQUACER_HSSPI_FIFO_RX_THRESHOLD : sspi->rx_words) <<
+ SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT);
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+ }
+
+ writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC);
+ writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC);
+
+ /* Trigger */
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ val |= SYNQUACER_HSSPI_DMSTART_START;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+
+ if (xfer->tx_buf) {
+ val = SYNQUACER_HSSPI_TXE_FIFO_EMPTY;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
+ status = wait_for_completion_timeout(&sspi->transfer_done,
+ msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC));
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
+ }
+
+ if (xfer->rx_buf) {
+ u32 buf[SYNQUACER_HSSPI_FIFO_DEPTH];
+
+ val = SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD |
+ SYNQUACER_HSSPI_RXE_SLAVE_RELEASED;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
+ status = wait_for_completion_timeout(&sspi->transfer_done,
+ msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC));
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
+
+ /* stop RX and clean RXFIFO */
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ val |= SYNQUACER_HSSPI_DMSTOP_STOP;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ sspi->rx_buf = buf;
+ sspi->rx_words = SYNQUACER_HSSPI_FIFO_DEPTH;
+ read_fifo(sspi);
+ }
+
+ if (status == 0) {
+ dev_err(sspi->dev, "failed to transfer. Timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct synquacer_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
+ SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
+ val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
+
+ if (!enable)
+ val |= SYNQUACER_HSSPI_DMSTOP_STOP;
+
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+}
+
+static int synquacer_spi_wait_status_update(struct synquacer_spi *sspi,
+ bool enable)
+{
+ u32 val;
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC);
+
+ /* wait MES(Module Enable Status) is updated */
+ do {
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL) &
+ SYNQUACER_HSSPI_MCTRL_MES;
+ if (enable && val)
+ return 0;
+ if (!enable && !val)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(sspi->dev, "timeout occurs in updating Module Enable Status\n");
+ return -EBUSY;
+}
+
+static int synquacer_spi_enable(struct spi_master *master)
+{
+ u32 val;
+ int status;
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+
+ /* Disable module */
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
+ status = synquacer_spi_wait_status_update(sspi, false);
+ if (status < 0)
+ return status;
+
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
+ writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC);
+ writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC);
+ writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_FAULTC);
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMCFG);
+ val &= ~SYNQUACER_HSSPI_DMCFG_SSDC;
+ val &= ~SYNQUACER_HSSPI_DMCFG_MSTARTEN;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMCFG);
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
+ if (sspi->clk_src_type == SYNQUACER_HSSPI_CLOCK_SRC_IPCLK)
+ val |= SYNQUACER_HSSPI_MCTRL_CDSS;
+ else
+ val &= ~SYNQUACER_HSSPI_MCTRL_CDSS;
+
+ val &= ~SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN;
+ val |= SYNQUACER_HSSPI_MCTRL_MEN;
+ val |= SYNQUACER_HSSPI_MCTRL_SYNCON;
+
+ /* Enable module */
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
+ status = synquacer_spi_wait_status_update(sspi, true);
+ if (status < 0)
+ return status;
+
+ return 0;
+}
+
+static irqreturn_t sq_spi_rx_handler(int irq, void *priv)
+{
+ uint32_t val;
+ struct synquacer_spi *sspi = priv;
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_RXF);
+ if ((val & SYNQUACER_HSSPI_RXF_SLAVE_RELEASED) ||
+ (val & SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD)) {
+ read_fifo(sspi);
+
+ if (sspi->rx_words == 0) {
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
+ complete(&sspi->transfer_done);
+ }
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t sq_spi_tx_handler(int irq, void *priv)
+{
+ uint32_t val;
+ struct synquacer_spi *sspi = priv;
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_TXF);
+ if (val & SYNQUACER_HSSPI_TXF_FIFO_EMPTY) {
+ if (sspi->tx_words == 0) {
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
+ complete(&sspi->transfer_done);
+ } else {
+ write_fifo(sspi);
+ }
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int synquacer_spi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_master *master;
+ struct synquacer_spi *sspi;
+ int ret;
+ int rx_irq, tx_irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ sspi = spi_master_get_devdata(master);
+ sspi->dev = &pdev->dev;
+
+ init_completion(&sspi->transfer_done);
+
+ sspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sspi->regs)) {
+ ret = PTR_ERR(sspi->regs);
+ goto put_spi;
+ }
+
+ sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; /* Default */
+ device_property_read_u32(&pdev->dev, "socionext,ihclk-rate",
+ &master->max_speed_hz); /* for ACPI */
+
+ if (dev_of_node(&pdev->dev)) {
+ if (device_property_match_string(&pdev->dev,
+ "clock-names", "iHCLK") >= 0) {
+ sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK;
+ sspi->clk = devm_clk_get(sspi->dev, "iHCLK");
+ } else if (device_property_match_string(&pdev->dev,
+ "clock-names", "iPCLK") >= 0) {
+ sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IPCLK;
+ sspi->clk = devm_clk_get(sspi->dev, "iPCLK");
+ } else {
+ dev_err(&pdev->dev, "specified wrong clock source\n");
+ ret = -EINVAL;
+ goto put_spi;
+ }
+
+ if (IS_ERR(sspi->clk)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(sspi->clk),
+ "clock not found\n");
+ goto put_spi;
+ }
+
+ ret = clk_prepare_enable(sspi->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock (%d)\n",
+ ret);
+ goto put_spi;
+ }
+
+ master->max_speed_hz = clk_get_rate(sspi->clk);
+ }
+
+ if (!master->max_speed_hz) {
+ dev_err(&pdev->dev, "missing clock source\n");
+ ret = -EINVAL;
+ goto disable_clk;
+ }
+ master->min_speed_hz = master->max_speed_hz / 254;
+
+ sspi->aces = device_property_read_bool(&pdev->dev,
+ "socionext,set-aces");
+ sspi->rtm = device_property_read_bool(&pdev->dev, "socionext,use-rtm");
+
+ master->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT;
+
+ rx_irq = platform_get_irq(pdev, 0);
+ if (rx_irq <= 0) {
+ ret = rx_irq;
+ goto disable_clk;
+ }
+ snprintf(sspi->rx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-rx",
+ dev_name(&pdev->dev));
+ ret = devm_request_irq(&pdev->dev, rx_irq, sq_spi_rx_handler,
+ 0, sspi->rx_irq_name, sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "request rx_irq failed (%d)\n", ret);
+ goto disable_clk;
+ }
+
+ tx_irq = platform_get_irq(pdev, 1);
+ if (tx_irq <= 0) {
+ ret = tx_irq;
+ goto disable_clk;
+ }
+ snprintf(sspi->tx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-tx",
+ dev_name(&pdev->dev));
+ ret = devm_request_irq(&pdev->dev, tx_irq, sq_spi_tx_handler,
+ 0, sspi->tx_irq_name, sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "request tx_irq failed (%d)\n", ret);
+ goto disable_clk;
+ }
+
+ master->dev.of_node = np;
+ master->dev.fwnode = pdev->dev.fwnode;
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+
+ master->set_cs = synquacer_spi_set_cs;
+ master->transfer_one = synquacer_spi_transfer_one;
+
+ ret = synquacer_spi_enable(master);
+ if (ret)
+ goto disable_clk;
+
+ pm_runtime_set_active(sspi->dev);
+ pm_runtime_enable(sspi->dev);
+
+ ret = devm_spi_register_master(sspi->dev, master);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(sspi->dev);
+disable_clk:
+ clk_disable_unprepare(sspi->clk);
+put_spi:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int synquacer_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+
+ pm_runtime_disable(sspi->dev);
+
+ clk_disable_unprepare(sspi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused synquacer_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ clk_disable_unprepare(sspi->clk);
+
+ return ret;
+}
+
+static int __maybe_unused synquacer_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ /* Ensure reconfigure during next xfer */
+ sspi->speed = 0;
+
+ ret = clk_prepare_enable(sspi->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = synquacer_spi_enable(master);
+ if (ret) {
+ clk_disable_unprepare(sspi->clk);
+ dev_err(dev, "failed to enable spi (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ ret = spi_master_resume(master);
+ if (ret < 0)
+ clk_disable_unprepare(sspi->clk);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(synquacer_spi_pm_ops, synquacer_spi_suspend,
+ synquacer_spi_resume);
+
+static const struct of_device_id synquacer_spi_of_match[] = {
+ {.compatible = "socionext,synquacer-spi"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, synquacer_spi_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id synquacer_hsspi_acpi_ids[] = {
+ { "SCX0004" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, synquacer_hsspi_acpi_ids);
+#endif
+
+static struct platform_driver synquacer_spi_driver = {
+ .driver = {
+ .name = "synquacer-spi",
+ .pm = &synquacer_spi_pm_ops,
+ .of_match_table = synquacer_spi_of_match,
+ .acpi_match_table = ACPI_PTR(synquacer_hsspi_acpi_ids),
+ },
+ .probe = synquacer_spi_probe,
+ .remove = synquacer_spi_remove,
+};
+module_platform_driver(synquacer_spi_driver);
+
+MODULE_DESCRIPTION("Socionext Synquacer HS-SPI controller driver");
+MODULE_AUTHOR("Masahisa Kojima <masahisa.kojima@linaro.org>");
+MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
new file mode 100644
index 000000000..d9be80e3e
--- /dev/null
+++ b/drivers/spi/spi-tegra114.c
@@ -0,0 +1,1536 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI driver for NVIDIA's Tegra114 SPI Controller.
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SPI_COMMAND1 0x000
+#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SPI_PACKED (1 << 5)
+#define SPI_TX_EN (1 << 11)
+#define SPI_RX_EN (1 << 12)
+#define SPI_BOTH_EN_BYTE (1 << 13)
+#define SPI_BOTH_EN_BIT (1 << 14)
+#define SPI_LSBYTE_FE (1 << 15)
+#define SPI_LSBIT_FE (1 << 16)
+#define SPI_BIDIROE (1 << 17)
+#define SPI_IDLE_SDA_DRIVE_LOW (0 << 18)
+#define SPI_IDLE_SDA_DRIVE_HIGH (1 << 18)
+#define SPI_IDLE_SDA_PULL_LOW (2 << 18)
+#define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
+#define SPI_IDLE_SDA_MASK (3 << 18)
+#define SPI_CS_SW_VAL (1 << 20)
+#define SPI_CS_SW_HW (1 << 21)
+/* SPI_CS_POL_INACTIVE bits are default high */
+ /* n from 0 to 3 */
+#define SPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
+#define SPI_CS_POL_INACTIVE_MASK (0xF << 22)
+
+#define SPI_CS_SEL_0 (0 << 26)
+#define SPI_CS_SEL_1 (1 << 26)
+#define SPI_CS_SEL_2 (2 << 26)
+#define SPI_CS_SEL_3 (3 << 26)
+#define SPI_CS_SEL_MASK (3 << 26)
+#define SPI_CS_SEL(x) (((x) & 0x3) << 26)
+#define SPI_CONTROL_MODE_0 (0 << 28)
+#define SPI_CONTROL_MODE_1 (1 << 28)
+#define SPI_CONTROL_MODE_2 (2 << 28)
+#define SPI_CONTROL_MODE_3 (3 << 28)
+#define SPI_CONTROL_MODE_MASK (3 << 28)
+#define SPI_MODE_SEL(x) (((x) & 0x3) << 28)
+#define SPI_M_S (1 << 30)
+#define SPI_PIO (1 << 31)
+
+#define SPI_COMMAND2 0x004
+#define SPI_TX_TAP_DELAY(x) (((x) & 0x3F) << 6)
+#define SPI_RX_TAP_DELAY(x) (((x) & 0x3F) << 0)
+
+#define SPI_CS_TIMING1 0x008
+#define SPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
+#define SPI_CS_SETUP_HOLD(reg, cs, val) \
+ ((((val) & 0xFFu) << ((cs) * 8)) | \
+ ((reg) & ~(0xFFu << ((cs) * 8))))
+
+#define SPI_CS_TIMING2 0x00C
+#define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1F) << 0)
+#define CS_ACTIVE_BETWEEN_PACKETS_0 (1 << 5)
+#define CYCLES_BETWEEN_PACKETS_1(x) (((x) & 0x1F) << 8)
+#define CS_ACTIVE_BETWEEN_PACKETS_1 (1 << 13)
+#define CYCLES_BETWEEN_PACKETS_2(x) (((x) & 0x1F) << 16)
+#define CS_ACTIVE_BETWEEN_PACKETS_2 (1 << 21)
+#define CYCLES_BETWEEN_PACKETS_3(x) (((x) & 0x1F) << 24)
+#define CS_ACTIVE_BETWEEN_PACKETS_3 (1 << 29)
+#define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val) \
+ (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \
+ ((reg) & ~(1 << ((cs) * 8 + 5))))
+#define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \
+ (reg = (((val) & 0x1F) << ((cs) * 8)) | \
+ ((reg) & ~(0x1F << ((cs) * 8))))
+#define MAX_SETUP_HOLD_CYCLES 16
+#define MAX_INACTIVE_CYCLES 32
+
+#define SPI_TRANS_STATUS 0x010
+#define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF)
+#define SPI_SLV_IDLE_COUNT(val) (((val) >> 16) & 0xFF)
+#define SPI_RDY (1 << 30)
+
+#define SPI_FIFO_STATUS 0x014
+#define SPI_RX_FIFO_EMPTY (1 << 0)
+#define SPI_RX_FIFO_FULL (1 << 1)
+#define SPI_TX_FIFO_EMPTY (1 << 2)
+#define SPI_TX_FIFO_FULL (1 << 3)
+#define SPI_RX_FIFO_UNF (1 << 4)
+#define SPI_RX_FIFO_OVF (1 << 5)
+#define SPI_TX_FIFO_UNF (1 << 6)
+#define SPI_TX_FIFO_OVF (1 << 7)
+#define SPI_ERR (1 << 8)
+#define SPI_TX_FIFO_FLUSH (1 << 14)
+#define SPI_RX_FIFO_FLUSH (1 << 15)
+#define SPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7F)
+#define SPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7F)
+#define SPI_FRAME_END (1 << 30)
+#define SPI_CS_INACTIVE (1 << 31)
+
+#define SPI_FIFO_ERROR (SPI_RX_FIFO_UNF | \
+ SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
+#define SPI_FIFO_EMPTY (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
+
+#define SPI_TX_DATA 0x018
+#define SPI_RX_DATA 0x01C
+
+#define SPI_DMA_CTL 0x020
+#define SPI_TX_TRIG_1 (0 << 15)
+#define SPI_TX_TRIG_4 (1 << 15)
+#define SPI_TX_TRIG_8 (2 << 15)
+#define SPI_TX_TRIG_16 (3 << 15)
+#define SPI_TX_TRIG_MASK (3 << 15)
+#define SPI_RX_TRIG_1 (0 << 19)
+#define SPI_RX_TRIG_4 (1 << 19)
+#define SPI_RX_TRIG_8 (2 << 19)
+#define SPI_RX_TRIG_16 (3 << 19)
+#define SPI_RX_TRIG_MASK (3 << 19)
+#define SPI_IE_TX (1 << 28)
+#define SPI_IE_RX (1 << 29)
+#define SPI_CONT (1 << 30)
+#define SPI_DMA (1 << 31)
+#define SPI_DMA_EN SPI_DMA
+
+#define SPI_DMA_BLK 0x024
+#define SPI_DMA_BLK_SET(x) (((x) & 0xFFFF) << 0)
+
+#define SPI_TX_FIFO 0x108
+#define SPI_RX_FIFO 0x188
+#define SPI_INTR_MASK 0x18c
+#define SPI_INTR_ALL_MASK (0x1fUL << 25)
+#define MAX_CHIP_SELECT 4
+#define SPI_FIFO_DEPTH 64
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SPI_TX_FIFO_EMPTY_COUNT(0x40)
+#define RX_FIFO_FULL_COUNT_ZERO SPI_RX_FIFO_FULL_COUNT(0)
+#define MAX_HOLD_CYCLES 16
+#define SPI_DEFAULT_SPEED 25000000
+
+struct tegra_spi_soc_data {
+ bool has_intr_mask_reg;
+};
+
+struct tegra_spi_client_data {
+ int tx_clk_tap_delay;
+ int rx_clk_tap_delay;
+};
+
+struct tegra_spi_data {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned irq;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ struct spi_device *cs_control;
+ unsigned cur_pos;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+ unsigned cur_direction;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+ bool use_hw_based_cs;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+
+ u32 command1_reg;
+ u32 dma_control_reg;
+ u32 def_command1_reg;
+ u32 def_command2_reg;
+ u32 spi_cs_timing1;
+ u32 spi_cs_timing2;
+ u8 last_used_cs;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+ const struct tegra_spi_soc_data *soc_data;
+};
+
+static int tegra_spi_runtime_suspend(struct device *dev);
+static int tegra_spi_runtime_resume(struct device *dev);
+
+static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
+ u32 val, unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+
+ /* Read back register to make sure that register writes completed */
+ if (reg != SPI_TX_FIFO)
+ readl(tspi->base + SPI_COMMAND1);
+}
+
+static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
+{
+ u32 val;
+
+ /* Write 1 to clear status register */
+ val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
+ tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
+
+ /* Clear fifo status error if any */
+ val = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (val & SPI_ERR)
+ tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
+ SPI_FIFO_STATUS);
+}
+
+static unsigned tegra_spi_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_spi_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word = t->bits_per_word;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
+
+ if ((bits_per_word == 8 || bits_per_word == 16 ||
+ bits_per_word == 32) && t->len > 3) {
+ tspi->is_packed = true;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = false;
+ tspi->words_per_32bit = 1;
+ }
+
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = (max_len + 3) / 4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+ return total_fifo_words;
+}
+
+static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ u32 fifo_status;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned int written_words;
+ unsigned fifo_words_left;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+
+ fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ fifo_words_left = tx_empty_count * tspi->words_per_32bit;
+ written_words = min(fifo_words_left, tspi->curr_dma_words);
+ nbytes = written_words * tspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(nbytes, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_spi_writel(tspi, x, SPI_TX_FIFO);
+ }
+
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ } else {
+ unsigned int write_bytes;
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ nbytes = written_words * tspi->bytes_per_word;
+ if (nbytes > t->len - tspi->cur_pos)
+ nbytes = t->len - tspi->cur_pos;
+ write_bytes = nbytes;
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_spi_writel(tspi, x, SPI_TX_FIFO);
+ }
+
+ tspi->cur_tx_pos += write_bytes;
+ }
+
+ return written_words;
+}
+
+static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ u32 fifo_status;
+ unsigned i, count;
+ unsigned int read_words = 0;
+ unsigned len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+
+ fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
+
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ read_words += tspi->curr_dma_words;
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ } else {
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ u8 bytes_per_word = tspi->bytes_per_word;
+ unsigned int read_bytes;
+
+ len = rx_full_count * bytes_per_word;
+ if (len > t->len - tspi->cur_pos)
+ len = t->len - tspi->cur_pos;
+ read_bytes = len;
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
+
+ for (i = 0; len && (i < bytes_per_word); i++, len--)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ read_words += rx_full_count;
+ tspi->cur_rx_pos += read_bytes;
+ }
+
+ return read_words;
+}
+
+static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
+ if (tspi->is_packed) {
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int write_bytes;
+
+ if (consume > t->len - tspi->cur_pos)
+ consume = t->len - tspi->cur_pos;
+ write_bytes = consume;
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = 0;
+
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tspi->tx_dma_buf[count] = x;
+ }
+
+ tspi->cur_tx_pos += write_bytes;
+ }
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tspi->is_packed) {
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int read_bytes;
+
+ if (consume > t->len - tspi->cur_pos)
+ consume = t->len - tspi->cur_pos;
+ read_bytes = consume;
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
+
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+
+ tspi->cur_rx_pos += read_bytes;
+ }
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_spi_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
+{
+ reinit_completion(&tspi->tx_dma_complete);
+ tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
+ tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->tx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
+ tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
+
+ dmaengine_submit(tspi->tx_dma_desc);
+ dma_async_issue_pending(tspi->tx_dma_chan);
+ return 0;
+}
+
+static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
+{
+ reinit_completion(&tspi->rx_dma_complete);
+ tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
+ tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->rx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
+ tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
+
+ dmaengine_submit(tspi->rx_dma_desc);
+ dma_async_issue_pending(tspi->rx_dma_chan);
+ return 0;
+}
+
+static int tegra_spi_flush_fifos(struct tegra_spi_data *tspi)
+{
+ unsigned long timeout = jiffies + HZ;
+ u32 status;
+
+ status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
+ status |= SPI_RX_FIFO_FLUSH | SPI_TX_FIFO_FLUSH;
+ tegra_spi_writel(tspi, status, SPI_FIFO_STATUS);
+ while ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
+ status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (time_after(jiffies, timeout)) {
+ dev_err(tspi->dev,
+ "timeout waiting for fifo flush\n");
+ return -EIO;
+ }
+
+ udelay(1);
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_spi_start_dma_based_transfer(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned int len;
+ int ret = 0;
+ u8 dma_burst;
+ struct dma_slave_config dma_sconfig = {0};
+
+ val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
+ tegra_spi_writel(tspi, val, SPI_DMA_BLK);
+
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
+
+ /* Set attention level based on length of transfer */
+ if (len & 0xF) {
+ val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
+ dma_burst = 1;
+ } else if (((len) >> 4) & 0x1) {
+ val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
+ dma_burst = 4;
+ } else {
+ val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
+ dma_burst = 8;
+ }
+
+ if (!tspi->soc_data->has_intr_mask_reg) {
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RX;
+ }
+
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ dma_sconfig.device_fc = true;
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tspi->tx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "DMA slave config failed: %d\n", ret);
+ return ret;
+ }
+
+ tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ ret = tegra_spi_start_tx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting tx dma failed, err %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tspi->rx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "DMA slave config failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ ret = tegra_spi_start_rx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting rx dma failed, err %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ tspi->dma_control_reg = val;
+
+ val |= SPI_DMA_EN;
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ return ret;
+}
+
+static int tegra_spi_start_cpu_based_transfer(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned cur_words;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ cur_words = tspi->curr_dma_words;
+
+ val = SPI_DMA_BLK_SET(cur_words - 1);
+ tegra_spi_writel(tspi, val, SPI_DMA_BLK);
+
+ val = 0;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RX;
+
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+
+ val = tspi->command1_reg;
+ val |= SPI_PIO;
+ tegra_spi_writel(tspi, val, SPI_COMMAND1);
+ return 0;
+}
+
+static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+
+ dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan))
+ return dev_err_probe(tspi->dev, PTR_ERR(dma_chan),
+ "Dma channel is not available\n");
+
+ dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+
+ if (dma_to_memory) {
+ tspi->rx_dma_chan = dma_chan;
+ tspi->rx_dma_buf = dma_buf;
+ tspi->rx_dma_phys = dma_phys;
+ } else {
+ tspi->tx_dma_chan = dma_chan;
+ tspi->tx_dma_buf = dma_buf;
+ tspi->tx_dma_phys = dma_phys;
+ }
+ return 0;
+}
+
+static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
+ bool dma_to_memory)
+{
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_buf = tspi->rx_dma_buf;
+ dma_chan = tspi->rx_dma_chan;
+ dma_phys = tspi->rx_dma_phys;
+ tspi->rx_dma_chan = NULL;
+ tspi->rx_dma_buf = NULL;
+ } else {
+ dma_buf = tspi->tx_dma_buf;
+ dma_chan = tspi->tx_dma_chan;
+ dma_phys = tspi->tx_dma_phys;
+ tspi->tx_dma_buf = NULL;
+ tspi->tx_dma_chan = NULL;
+ }
+ if (!dma_chan)
+ return;
+
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+}
+
+static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct spi_delay *setup = &spi->cs_setup;
+ struct spi_delay *hold = &spi->cs_hold;
+ struct spi_delay *inactive = &spi->cs_inactive;
+ u8 setup_dly, hold_dly, inactive_dly;
+ u32 setup_hold;
+ u32 spi_cs_timing;
+ u32 inactive_cycles;
+ u8 cs_state;
+
+ if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
+ (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
+ (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
+ dev_err(&spi->dev,
+ "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
+ SPI_DELAY_UNIT_SCK);
+ return -EINVAL;
+ }
+
+ setup_dly = setup ? setup->value : 0;
+ hold_dly = hold ? hold->value : 0;
+ inactive_dly = inactive ? inactive->value : 0;
+
+ setup_dly = min_t(u8, setup_dly, MAX_SETUP_HOLD_CYCLES);
+ hold_dly = min_t(u8, hold_dly, MAX_SETUP_HOLD_CYCLES);
+ if (setup_dly && hold_dly) {
+ setup_hold = SPI_SETUP_HOLD(setup_dly - 1, hold_dly - 1);
+ spi_cs_timing = SPI_CS_SETUP_HOLD(tspi->spi_cs_timing1,
+ spi->chip_select,
+ setup_hold);
+ if (tspi->spi_cs_timing1 != spi_cs_timing) {
+ tspi->spi_cs_timing1 = spi_cs_timing;
+ tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING1);
+ }
+ }
+
+ inactive_cycles = min_t(u8, inactive_dly, MAX_INACTIVE_CYCLES);
+ if (inactive_cycles)
+ inactive_cycles--;
+ cs_state = inactive_cycles ? 0 : 1;
+ spi_cs_timing = tspi->spi_cs_timing2;
+ SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
+ cs_state);
+ SPI_SET_CYCLES_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
+ inactive_cycles);
+ if (tspi->spi_cs_timing2 != spi_cs_timing) {
+ tspi->spi_cs_timing2 = spi_cs_timing;
+ tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING2);
+ }
+
+ return 0;
+}
+
+static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t,
+ bool is_first_of_msg,
+ bool is_single_xfer)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_client_data *cdata = spi->controller_data;
+ u32 speed = t->speed_hz;
+ u8 bits_per_word = t->bits_per_word;
+ u32 command1, command2;
+ int req_mode;
+ u32 tx_tap = 0, rx_tap = 0;
+
+ if (speed != tspi->cur_speed) {
+ clk_set_rate(tspi->clk, speed);
+ tspi->cur_speed = speed;
+ }
+
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->curr_xfer = t;
+
+ if (is_first_of_msg) {
+ tegra_spi_clear_status(tspi);
+
+ command1 = tspi->def_command1_reg;
+ command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
+
+ command1 &= ~SPI_CONTROL_MODE_MASK;
+ req_mode = spi->mode & 0x3;
+ if (req_mode == SPI_MODE_0)
+ command1 |= SPI_CONTROL_MODE_0;
+ else if (req_mode == SPI_MODE_1)
+ command1 |= SPI_CONTROL_MODE_1;
+ else if (req_mode == SPI_MODE_2)
+ command1 |= SPI_CONTROL_MODE_2;
+ else if (req_mode == SPI_MODE_3)
+ command1 |= SPI_CONTROL_MODE_3;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ command1 |= SPI_LSBIT_FE;
+ else
+ command1 &= ~SPI_LSBIT_FE;
+
+ if (spi->mode & SPI_3WIRE)
+ command1 |= SPI_BIDIROE;
+ else
+ command1 &= ~SPI_BIDIROE;
+
+ if (tspi->cs_control) {
+ if (tspi->cs_control != spi)
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ tspi->cs_control = NULL;
+ } else
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+
+ /* GPIO based chip select control */
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 1);
+
+ if (is_single_xfer && !(t->cs_change)) {
+ tspi->use_hw_based_cs = true;
+ command1 &= ~(SPI_CS_SW_HW | SPI_CS_SW_VAL);
+ } else {
+ tspi->use_hw_based_cs = false;
+ command1 |= SPI_CS_SW_HW;
+ if (spi->mode & SPI_CS_HIGH)
+ command1 |= SPI_CS_SW_VAL;
+ else
+ command1 &= ~SPI_CS_SW_VAL;
+ }
+
+ if (tspi->last_used_cs != spi->chip_select) {
+ if (cdata && cdata->tx_clk_tap_delay)
+ tx_tap = cdata->tx_clk_tap_delay;
+ if (cdata && cdata->rx_clk_tap_delay)
+ rx_tap = cdata->rx_clk_tap_delay;
+ command2 = SPI_TX_TAP_DELAY(tx_tap) |
+ SPI_RX_TAP_DELAY(rx_tap);
+ if (command2 != tspi->def_command2_reg)
+ tegra_spi_writel(tspi, command2, SPI_COMMAND2);
+ tspi->last_used_cs = spi->chip_select;
+ }
+
+ } else {
+ command1 = tspi->command1_reg;
+ command1 &= ~SPI_BIT_LENGTH(~0);
+ command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
+ }
+
+ return command1;
+}
+
+static int tegra_spi_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, u32 command1)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned total_fifo_words;
+ int ret;
+
+ total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
+
+ if (t->rx_nbits == SPI_NBITS_DUAL || t->tx_nbits == SPI_NBITS_DUAL)
+ command1 |= SPI_BOTH_EN_BIT;
+ else
+ command1 &= ~SPI_BOTH_EN_BIT;
+
+ if (tspi->is_packed)
+ command1 |= SPI_PACKED;
+ else
+ command1 &= ~SPI_PACKED;
+
+ command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command1 |= SPI_RX_EN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command1 |= SPI_TX_EN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+ command1 |= SPI_CS_SEL(spi->chip_select);
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ tspi->command1_reg = command1;
+
+ dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
+ tspi->def_command1_reg, (unsigned)command1);
+
+ ret = tegra_spi_flush_fifos(tspi);
+ if (ret < 0)
+ return ret;
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ ret = tegra_spi_start_dma_based_transfer(tspi, t);
+ else
+ ret = tegra_spi_start_cpu_based_transfer(tspi, t);
+ return ret;
+}
+
+static struct tegra_spi_client_data
+ *tegra_spi_parse_cdata_dt(struct spi_device *spi)
+{
+ struct tegra_spi_client_data *cdata;
+ struct device_node *slave_np;
+
+ slave_np = spi->dev.of_node;
+ if (!slave_np) {
+ dev_dbg(&spi->dev, "device node not found\n");
+ return NULL;
+ }
+
+ cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
+ if (!cdata)
+ return NULL;
+
+ of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
+ &cdata->tx_clk_tap_delay);
+ of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
+ &cdata->rx_clk_tap_delay);
+ return cdata;
+}
+
+static void tegra_spi_cleanup(struct spi_device *spi)
+{
+ struct tegra_spi_client_data *cdata = spi->controller_data;
+
+ spi->controller_data = NULL;
+ if (spi->dev.of_node)
+ kfree(cdata);
+}
+
+static int tegra_spi_setup(struct spi_device *spi)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_client_data *cdata = spi->controller_data;
+ u32 val;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+ if (!cdata) {
+ cdata = tegra_spi_parse_cdata_dt(spi);
+ spi->controller_data = cdata;
+ }
+
+ ret = pm_runtime_resume_and_get(tspi->dev);
+ if (ret < 0) {
+ dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ if (cdata)
+ tegra_spi_cleanup(spi);
+ return ret;
+ }
+
+ if (tspi->soc_data->has_intr_mask_reg) {
+ val = tegra_spi_readl(tspi, SPI_INTR_MASK);
+ val &= ~SPI_INTR_ALL_MASK;
+ tegra_spi_writel(tspi, val, SPI_INTR_MASK);
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ /* GPIO based chip select control */
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 0);
+
+ val = tspi->def_command1_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
+ else
+ val |= SPI_CS_POL_INACTIVE(spi->chip_select);
+ tspi->def_command1_reg = val;
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static void tegra_spi_transfer_end(struct spi_device *spi)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
+
+ /* GPIO based chip select control */
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 0);
+
+ if (!tspi->use_hw_based_cs) {
+ if (cs_val)
+ tspi->command1_reg |= SPI_CS_SW_VAL;
+ else
+ tspi->command1_reg &= ~SPI_CS_SW_VAL;
+ tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ }
+
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+}
+
+static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
+{
+ dev_dbg(tspi->dev, "============ SPI REGISTER DUMP ============\n");
+ dev_dbg(tspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
+ tegra_spi_readl(tspi, SPI_COMMAND1),
+ tegra_spi_readl(tspi, SPI_COMMAND2));
+ dev_dbg(tspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
+ tegra_spi_readl(tspi, SPI_DMA_CTL),
+ tegra_spi_readl(tspi, SPI_DMA_BLK));
+ dev_dbg(tspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
+ tegra_spi_readl(tspi, SPI_TRANS_STATUS),
+ tegra_spi_readl(tspi, SPI_FIFO_STATUS));
+}
+
+static int tegra_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+ bool skip = false;
+ int single_xfer;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ single_xfer = list_is_singular(&msg->transfers);
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ u32 cmd1;
+
+ reinit_completion(&tspi->xfer_completion);
+
+ cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg,
+ single_xfer);
+
+ if (!xfer->len) {
+ ret = 0;
+ skip = true;
+ goto complete_xfer;
+ }
+
+ ret = tegra_spi_start_transfer_one(spi, xfer, cmd1);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto complete_xfer;
+ }
+
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev, "spi transfer timeout\n");
+ if (tspi->is_curr_dma_xfer &&
+ (tspi->cur_direction & DATA_DIR_TX))
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ if (tspi->is_curr_dma_xfer &&
+ (tspi->cur_direction & DATA_DIR_RX))
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ ret = -EIO;
+ tegra_spi_dump_regs(tspi);
+ tegra_spi_flush_fifos(tspi);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ tspi->last_used_cs = master->num_chipselect + 1;
+ goto complete_xfer;
+ }
+
+ if (tspi->tx_status || tspi->rx_status) {
+ dev_err(tspi->dev, "Error in Transfer\n");
+ ret = -EIO;
+ tegra_spi_dump_regs(tspi);
+ goto complete_xfer;
+ }
+ msg->actual_length += xfer->len;
+
+complete_xfer:
+ if (ret < 0 || skip) {
+ tegra_spi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ goto exit;
+ } else if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ if (xfer->cs_change)
+ tspi->cs_control = spi;
+ else {
+ tegra_spi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
+ } else if (xfer->cs_change) {
+ tegra_spi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
+
+ }
+ ret = 0;
+exit:
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status) {
+ dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
+ tspi->status_reg);
+ dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
+ tspi->command1_reg, tspi->dma_control_reg);
+ tegra_spi_dump_regs(tspi);
+ tegra_spi_flush_fifos(tspi);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ tegra_spi_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ dev_err(tspi->dev, "TxDma Xfer failed\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ dev_err(tspi->dev, "RxDma Xfer failed\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
+ tspi->status_reg);
+ dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
+ tspi->command1_reg, tspi->dma_control_reg);
+ tegra_spi_dump_regs(tspi);
+ tegra_spi_flush_fifos(tspi);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ err = tegra_spi_start_dma_based_transfer(tspi, t);
+ else
+ err = tegra_spi_start_cpu_based_transfer(tspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data)
+{
+ struct tegra_spi_data *tspi = context_data;
+
+ if (!tspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tspi);
+ return handle_dma_based_xfer(tspi);
+}
+
+static irqreturn_t tegra_spi_isr(int irq, void *context_data)
+{
+ struct tegra_spi_data *tspi = context_data;
+
+ tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
+ tegra_spi_clear_status(tspi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static struct tegra_spi_soc_data tegra114_spi_soc_data = {
+ .has_intr_mask_reg = false,
+};
+
+static struct tegra_spi_soc_data tegra124_spi_soc_data = {
+ .has_intr_mask_reg = false,
+};
+
+static struct tegra_spi_soc_data tegra210_spi_soc_data = {
+ .has_intr_mask_reg = true,
+};
+
+static const struct of_device_id tegra_spi_of_match[] = {
+ {
+ .compatible = "nvidia,tegra114-spi",
+ .data = &tegra114_spi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra124-spi",
+ .data = &tegra124_spi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra210-spi",
+ .data = &tegra210_spi_soc_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
+
+static int tegra_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_spi_data *tspi;
+ struct resource *r;
+ int ret, spi_irq;
+ int bus_num;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+ tspi = spi_master_get_devdata(master);
+
+ if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
+
+ /* the spi->mode bits understood by this driver: */
+ master->use_gpio_descriptors = true;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->setup = tegra_spi_setup;
+ master->cleanup = tegra_spi_cleanup;
+ master->transfer_one_message = tegra_spi_transfer_one_message;
+ master->set_cs_timing = tegra_spi_set_hw_cs_timing;
+ master->num_chipselect = MAX_CHIP_SELECT;
+ master->auto_runtime_pm = true;
+ bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
+ if (bus_num >= 0)
+ master->bus_num = bus_num;
+
+ tspi->master = master;
+ tspi->dev = &pdev->dev;
+ spin_lock_init(&tspi->lock);
+
+ tspi->soc_data = of_device_get_match_data(&pdev->dev);
+ if (!tspi->soc_data) {
+ dev_err(&pdev->dev, "unsupported tegra\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tspi->base)) {
+ ret = PTR_ERR(tspi->base);
+ goto exit_free_master;
+ }
+ tspi->phys = r->start;
+
+ spi_irq = platform_get_irq(pdev, 0);
+ if (spi_irq < 0) {
+ ret = spi_irq;
+ goto exit_free_master;
+ }
+ tspi->irq = spi_irq;
+
+ tspi->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto exit_free_master;
+ }
+
+ tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+ goto exit_free_master;
+ }
+
+ tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+
+ ret = tegra_spi_init_dma_param(tspi, true);
+ if (ret < 0)
+ goto exit_free_master;
+ ret = tegra_spi_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+
+ init_completion(&tspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_spi_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ tspi->def_command1_reg = SPI_M_S;
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1);
+ tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2);
+ tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
+ tspi->last_used_cs = master->num_chipselect + 1;
+ pm_runtime_put(&pdev->dev);
+ ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
+ tegra_spi_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_pm_disable;
+ }
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_free_irq;
+ }
+ return ret;
+
+exit_free_irq:
+ free_irq(spi_irq, tspi);
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_spi_runtime_suspend(&pdev->dev);
+ tegra_spi_deinit_dma_param(tspi, false);
+exit_rx_dma_free:
+ tegra_spi_deinit_dma_param(tspi, true);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+
+ free_irq(tspi->irq, tspi);
+
+ if (tspi->tx_dma_chan)
+ tegra_spi_deinit_dma_param(tspi, false);
+
+ if (tspi->rx_dma_chan)
+ tegra_spi_deinit_dma_param(tspi, true);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_spi_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
+ tspi->last_used_cs = master->num_chipselect + 1;
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_spi_readl(tspi, SPI_COMMAND1);
+
+ clk_disable_unprepare(tspi->clk);
+ return 0;
+}
+
+static int tegra_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
+ tegra_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
+};
+static struct platform_driver tegra_spi_driver = {
+ .driver = {
+ .name = "spi-tegra114",
+ .pm = &tegra_spi_pm_ops,
+ .of_match_table = tegra_spi_of_match,
+ },
+ .probe = tegra_spi_probe,
+ .remove = tegra_spi_remove,
+};
+module_platform_driver(tegra_spi_driver);
+
+MODULE_ALIAS("platform:spi-tegra114");
+MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
new file mode 100644
index 000000000..d4bebb431
--- /dev/null
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI driver for Nvidia's Tegra20 Serial Flash Controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SPI_COMMAND 0x000
+#define SPI_GO BIT(30)
+#define SPI_M_S BIT(28)
+#define SPI_ACTIVE_SCLK_MASK (0x3 << 26)
+#define SPI_ACTIVE_SCLK_DRIVE_LOW (0 << 26)
+#define SPI_ACTIVE_SCLK_DRIVE_HIGH (1 << 26)
+#define SPI_ACTIVE_SCLK_PULL_LOW (2 << 26)
+#define SPI_ACTIVE_SCLK_PULL_HIGH (3 << 26)
+
+#define SPI_CK_SDA_FALLING (1 << 21)
+#define SPI_CK_SDA_RISING (0 << 21)
+#define SPI_CK_SDA_MASK (1 << 21)
+#define SPI_ACTIVE_SDA (0x3 << 18)
+#define SPI_ACTIVE_SDA_DRIVE_LOW (0 << 18)
+#define SPI_ACTIVE_SDA_DRIVE_HIGH (1 << 18)
+#define SPI_ACTIVE_SDA_PULL_LOW (2 << 18)
+#define SPI_ACTIVE_SDA_PULL_HIGH (3 << 18)
+
+#define SPI_CS_POL_INVERT BIT(16)
+#define SPI_TX_EN BIT(15)
+#define SPI_RX_EN BIT(14)
+#define SPI_CS_VAL_HIGH BIT(13)
+#define SPI_CS_VAL_LOW 0x0
+#define SPI_CS_SW BIT(12)
+#define SPI_CS_HW 0x0
+#define SPI_CS_DELAY_MASK (7 << 9)
+#define SPI_CS3_EN BIT(8)
+#define SPI_CS2_EN BIT(7)
+#define SPI_CS1_EN BIT(6)
+#define SPI_CS0_EN BIT(5)
+
+#define SPI_CS_MASK (SPI_CS3_EN | SPI_CS2_EN | \
+ SPI_CS1_EN | SPI_CS0_EN)
+#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+
+#define SPI_MODES (SPI_ACTIVE_SCLK_MASK | SPI_CK_SDA_MASK)
+
+#define SPI_STATUS 0x004
+#define SPI_BSY BIT(31)
+#define SPI_RDY BIT(30)
+#define SPI_TXF_FLUSH BIT(29)
+#define SPI_RXF_FLUSH BIT(28)
+#define SPI_RX_UNF BIT(27)
+#define SPI_TX_OVF BIT(26)
+#define SPI_RXF_EMPTY BIT(25)
+#define SPI_RXF_FULL BIT(24)
+#define SPI_TXF_EMPTY BIT(23)
+#define SPI_TXF_FULL BIT(22)
+#define SPI_BLK_CNT(count) (((count) & 0xffff) + 1)
+
+#define SPI_FIFO_ERROR (SPI_RX_UNF | SPI_TX_OVF)
+#define SPI_FIFO_EMPTY (SPI_TX_EMPTY | SPI_RX_EMPTY)
+
+#define SPI_RX_CMP 0x8
+#define SPI_DMA_CTL 0x0C
+#define SPI_DMA_EN BIT(31)
+#define SPI_IE_RXC BIT(27)
+#define SPI_IE_TXC BIT(26)
+#define SPI_PACKED BIT(20)
+#define SPI_RX_TRIG_MASK (0x3 << 18)
+#define SPI_RX_TRIG_1W (0x0 << 18)
+#define SPI_RX_TRIG_4W (0x1 << 18)
+#define SPI_TX_TRIG_MASK (0x3 << 16)
+#define SPI_TX_TRIG_1W (0x0 << 16)
+#define SPI_TX_TRIG_4W (0x1 << 16)
+#define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF)
+
+#define SPI_TX_FIFO 0x10
+#define SPI_RX_FIFO 0x20
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define MAX_CHIP_SELECT 4
+#define SPI_FIFO_DEPTH 4
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+struct tegra_sflash_data {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *base;
+ unsigned irq;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned bytes_per_word;
+ unsigned cur_direction;
+ unsigned curr_xfer_words;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+
+ u32 def_command_reg;
+ u32 command_reg;
+ u32 dma_control_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+};
+
+static int tegra_sflash_runtime_suspend(struct device *dev);
+static int tegra_sflash_runtime_resume(struct device *dev);
+
+static inline u32 tegra_sflash_readl(struct tegra_sflash_data *tsd,
+ unsigned long reg)
+{
+ return readl(tsd->base + reg);
+}
+
+static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd,
+ u32 val, unsigned long reg)
+{
+ writel(val, tsd->base + reg);
+}
+
+static void tegra_sflash_clear_status(struct tegra_sflash_data *tsd)
+{
+ /* Write 1 to clear status register */
+ tegra_sflash_writel(tsd, SPI_RDY | SPI_FIFO_ERROR, SPI_STATUS);
+}
+
+static unsigned tegra_sflash_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_sflash_data *tsd,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tsd->cur_pos;
+ unsigned max_word;
+
+ tsd->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
+ max_word = remain_len / tsd->bytes_per_word;
+ if (max_word > SPI_FIFO_DEPTH)
+ max_word = SPI_FIFO_DEPTH;
+ tsd->curr_xfer_words = max_word;
+ return max_word;
+}
+
+static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ u32 status;
+ unsigned max_n_32bit = tsd->curr_xfer_words;
+ u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos;
+
+ if (max_n_32bit > SPI_FIFO_DEPTH)
+ max_n_32bit = SPI_FIFO_DEPTH;
+ nbytes = max_n_32bit * tsd->bytes_per_word;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ while (!(status & SPI_TXF_FULL)) {
+ int i;
+ u32 x = 0;
+
+ for (i = 0; nbytes && (i < tsd->bytes_per_word);
+ i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_sflash_writel(tsd, x, SPI_TX_FIFO);
+ if (!nbytes)
+ break;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ }
+ tsd->cur_tx_pos += max_n_32bit * tsd->bytes_per_word;
+ return max_n_32bit;
+}
+
+static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ u32 status;
+ unsigned int read_words = 0;
+ u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ while (!(status & SPI_RXF_EMPTY)) {
+ int i;
+ u32 x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
+
+ for (i = 0; (i < tsd->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ read_words++;
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ }
+ tsd->cur_rx_pos += read_words * tsd->bytes_per_word;
+ return 0;
+}
+
+static int tegra_sflash_start_cpu_based_transfer(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ u32 val = 0;
+ unsigned cur_words;
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TXC;
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RXC;
+
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ tsd->dma_control_reg = val;
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_sflash_fill_tx_fifo_from_client_txbuf(tsd, t);
+ else
+ cur_words = tsd->curr_xfer_words;
+ val |= SPI_DMA_BLK_COUNT(cur_words);
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ tsd->dma_control_reg = val;
+ val |= SPI_DMA_EN;
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ return 0;
+}
+
+static int tegra_sflash_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg,
+ bool is_single_xfer)
+{
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u32 command;
+
+ speed = t->speed_hz;
+ if (speed != tsd->cur_speed) {
+ clk_set_rate(tsd->clk, speed);
+ tsd->cur_speed = speed;
+ }
+
+ tsd->cur_spi = spi;
+ tsd->cur_pos = 0;
+ tsd->cur_rx_pos = 0;
+ tsd->cur_tx_pos = 0;
+ tsd->curr_xfer = t;
+ tegra_sflash_calculate_curr_xfer_param(spi, tsd, t);
+ if (is_first_of_msg) {
+ command = tsd->def_command_reg;
+ command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
+ command |= SPI_CS_VAL_HIGH;
+
+ command &= ~SPI_MODES;
+ if (spi->mode & SPI_CPHA)
+ command |= SPI_CK_SDA_FALLING;
+
+ if (spi->mode & SPI_CPOL)
+ command |= SPI_ACTIVE_SCLK_DRIVE_HIGH;
+ else
+ command |= SPI_ACTIVE_SCLK_DRIVE_LOW;
+ command |= SPI_CS0_EN << spi->chip_select;
+ } else {
+ command = tsd->command_reg;
+ command &= ~SPI_BIT_LENGTH(~0);
+ command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
+ command &= ~(SPI_RX_EN | SPI_TX_EN);
+ }
+
+ tsd->cur_direction = 0;
+ if (t->rx_buf) {
+ command |= SPI_RX_EN;
+ tsd->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command |= SPI_TX_EN;
+ tsd->cur_direction |= DATA_DIR_TX;
+ }
+ tegra_sflash_writel(tsd, command, SPI_COMMAND);
+ tsd->command_reg = command;
+
+ return tegra_sflash_start_cpu_based_transfer(tsd, t);
+}
+
+static int tegra_sflash_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ int single_xfer;
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+ single_xfer = list_is_singular(&msg->transfers);
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ reinit_completion(&tsd->xfer_completion);
+ ret = tegra_sflash_start_transfer_one(spi, xfer,
+ is_first_msg, single_xfer);
+ if (ret < 0) {
+ dev_err(tsd->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto exit;
+ }
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tsd->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tsd->dev,
+ "spi transfer timeout, err %d\n", ret);
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tsd->tx_status || tsd->rx_status) {
+ dev_err(tsd->dev, "Error in Transfer\n");
+ ret = -EIO;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ if (xfer->cs_change && xfer->delay.value) {
+ tegra_sflash_writel(tsd, tsd->def_command_reg,
+ SPI_COMMAND);
+ spi_transfer_delay_exec(xfer);
+ }
+ }
+ ret = 0;
+exit:
+ tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd)
+{
+ struct spi_transfer *t = tsd->curr_xfer;
+
+ spin_lock(&tsd->lock);
+ if (tsd->tx_status || tsd->rx_status || (tsd->status_reg & SPI_BSY)) {
+ dev_err(tsd->dev,
+ "CpuXfer ERROR bit set 0x%x\n", tsd->status_reg);
+ dev_err(tsd->dev,
+ "CpuXfer 0x%08x:0x%08x\n", tsd->command_reg,
+ tsd->dma_control_reg);
+ reset_control_assert(tsd->rst);
+ udelay(2);
+ reset_control_deassert(tsd->rst);
+ complete(&tsd->xfer_completion);
+ goto exit;
+ }
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ tegra_sflash_read_rx_fifo_to_client_rxbuf(tsd, t);
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ tsd->cur_pos = tsd->cur_tx_pos;
+ else
+ tsd->cur_pos = tsd->cur_rx_pos;
+
+ if (tsd->cur_pos == t->len) {
+ complete(&tsd->xfer_completion);
+ goto exit;
+ }
+
+ tegra_sflash_calculate_curr_xfer_param(tsd->cur_spi, tsd, t);
+ tegra_sflash_start_cpu_based_transfer(tsd, t);
+exit:
+ spin_unlock(&tsd->lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
+{
+ struct tegra_sflash_data *tsd = context_data;
+
+ tsd->status_reg = tegra_sflash_readl(tsd, SPI_STATUS);
+ if (tsd->cur_direction & DATA_DIR_TX)
+ tsd->tx_status = tsd->status_reg & SPI_TX_OVF;
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ tsd->rx_status = tsd->status_reg & SPI_RX_UNF;
+ tegra_sflash_clear_status(tsd);
+
+ return handle_cpu_based_xfer(tsd);
+}
+
+static const struct of_device_id tegra_sflash_of_match[] = {
+ { .compatible = "nvidia,tegra20-sflash", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_sflash_of_match);
+
+static int tegra_sflash_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_sflash_data *tsd;
+ int ret;
+ const struct of_device_id *match;
+
+ match = of_match_device(tegra_sflash_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tsd));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->transfer_one_message = tegra_sflash_transfer_one_message;
+ master->auto_runtime_pm = true;
+ master->num_chipselect = MAX_CHIP_SELECT;
+
+ platform_set_drvdata(pdev, master);
+ tsd = spi_master_get_devdata(master);
+ tsd->master = master;
+ tsd->dev = &pdev->dev;
+ spin_lock_init(&tsd->lock);
+
+ if (of_property_read_u32(tsd->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
+
+ tsd->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tsd->base)) {
+ ret = PTR_ERR(tsd->base);
+ goto exit_free_master;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto exit_free_master;
+ tsd->irq = ret;
+
+ ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
+ dev_name(&pdev->dev), tsd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tsd->irq);
+ goto exit_free_master;
+ }
+
+ tsd->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tsd->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tsd->clk);
+ goto exit_free_irq;
+ }
+
+ tsd->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
+ if (IS_ERR(tsd->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tsd->rst);
+ goto exit_free_irq;
+ }
+
+ init_completion(&tsd->xfer_completion);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_sflash_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+
+ /* Reset controller */
+ reset_control_assert(tsd->rst);
+ udelay(2);
+ reset_control_deassert(tsd->rst);
+
+ tsd->def_command_reg = SPI_M_S | SPI_CS_SW;
+ tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_sflash_runtime_suspend(&pdev->dev);
+exit_free_irq:
+ free_irq(tsd->irq, tsd);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_sflash_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+
+ free_irq(tsd->irq, tsd);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_sflash_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_sflash_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_sflash_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_sflash_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_sflash_readl(tsd, SPI_COMMAND);
+
+ clk_disable_unprepare(tsd->clk);
+ return 0;
+}
+
+static int tegra_sflash_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tsd->clk);
+ if (ret < 0) {
+ dev_err(tsd->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops slink_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_sflash_runtime_suspend,
+ tegra_sflash_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_sflash_suspend, tegra_sflash_resume)
+};
+static struct platform_driver tegra_sflash_driver = {
+ .driver = {
+ .name = "spi-tegra-sflash",
+ .pm = &slink_pm_ops,
+ .of_match_table = tegra_sflash_of_match,
+ },
+ .probe = tegra_sflash_probe,
+ .remove = tegra_sflash_remove,
+};
+module_platform_driver(tegra_sflash_driver);
+
+MODULE_ALIAS("platform:spi-tegra-sflash");
+MODULE_DESCRIPTION("NVIDIA Tegra20 Serial Flash Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
new file mode 100644
index 000000000..24cab56ec
--- /dev/null
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -0,0 +1,1232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI driver for Nvidia's Tegra20/Tegra30 SLINK Controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#include <soc/tegra/common.h>
+
+#define SLINK_COMMAND 0x000
+#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
+#define SLINK_BOTH_EN (1 << 10)
+#define SLINK_CS_SW (1 << 11)
+#define SLINK_CS_VALUE (1 << 12)
+#define SLINK_CS_POLARITY (1 << 13)
+#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
+#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
+#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
+#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
+#define SLINK_IDLE_SDA_MASK (3 << 16)
+#define SLINK_CS_POLARITY1 (1 << 20)
+#define SLINK_CK_SDA (1 << 21)
+#define SLINK_CS_POLARITY2 (1 << 22)
+#define SLINK_CS_POLARITY3 (1 << 23)
+#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
+#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
+#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
+#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
+#define SLINK_IDLE_SCLK_MASK (3 << 24)
+#define SLINK_M_S (1 << 28)
+#define SLINK_WAIT (1 << 29)
+#define SLINK_GO (1 << 30)
+#define SLINK_ENB (1 << 31)
+
+#define SLINK_MODES (SLINK_IDLE_SCLK_MASK | SLINK_CK_SDA)
+
+#define SLINK_COMMAND2 0x004
+#define SLINK_LSBFE (1 << 0)
+#define SLINK_SSOE (1 << 1)
+#define SLINK_SPIE (1 << 4)
+#define SLINK_BIDIROE (1 << 6)
+#define SLINK_MODFEN (1 << 7)
+#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
+#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
+#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
+#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
+#define SLINK_FIFO_REFILLS_0 (0 << 22)
+#define SLINK_FIFO_REFILLS_1 (1 << 22)
+#define SLINK_FIFO_REFILLS_2 (2 << 22)
+#define SLINK_FIFO_REFILLS_3 (3 << 22)
+#define SLINK_FIFO_REFILLS_MASK (3 << 22)
+#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
+#define SLINK_SPC0 (1 << 29)
+#define SLINK_TXEN (1 << 30)
+#define SLINK_RXEN (1 << 31)
+
+#define SLINK_STATUS 0x008
+#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
+#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
+#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define SLINK_MODF (1 << 16)
+#define SLINK_RX_UNF (1 << 18)
+#define SLINK_TX_OVF (1 << 19)
+#define SLINK_TX_FULL (1 << 20)
+#define SLINK_TX_EMPTY (1 << 21)
+#define SLINK_RX_FULL (1 << 22)
+#define SLINK_RX_EMPTY (1 << 23)
+#define SLINK_TX_UNF (1 << 24)
+#define SLINK_RX_OVF (1 << 25)
+#define SLINK_TX_FLUSH (1 << 26)
+#define SLINK_RX_FLUSH (1 << 27)
+#define SLINK_SCLK (1 << 28)
+#define SLINK_ERR (1 << 29)
+#define SLINK_RDY (1 << 30)
+#define SLINK_BSY (1 << 31)
+#define SLINK_FIFO_ERROR (SLINK_TX_OVF | SLINK_RX_UNF | \
+ SLINK_TX_UNF | SLINK_RX_OVF)
+
+#define SLINK_FIFO_EMPTY (SLINK_TX_EMPTY | SLINK_RX_EMPTY)
+
+#define SLINK_MAS_DATA 0x010
+#define SLINK_SLAVE_DATA 0x014
+
+#define SLINK_DMA_CTL 0x018
+#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
+#define SLINK_TX_TRIG_1 (0 << 16)
+#define SLINK_TX_TRIG_4 (1 << 16)
+#define SLINK_TX_TRIG_8 (2 << 16)
+#define SLINK_TX_TRIG_16 (3 << 16)
+#define SLINK_TX_TRIG_MASK (3 << 16)
+#define SLINK_RX_TRIG_1 (0 << 18)
+#define SLINK_RX_TRIG_4 (1 << 18)
+#define SLINK_RX_TRIG_8 (2 << 18)
+#define SLINK_RX_TRIG_16 (3 << 18)
+#define SLINK_RX_TRIG_MASK (3 << 18)
+#define SLINK_PACKED (1 << 20)
+#define SLINK_PACK_SIZE_4 (0 << 21)
+#define SLINK_PACK_SIZE_8 (1 << 21)
+#define SLINK_PACK_SIZE_16 (2 << 21)
+#define SLINK_PACK_SIZE_32 (3 << 21)
+#define SLINK_PACK_SIZE_MASK (3 << 21)
+#define SLINK_IE_TXC (1 << 26)
+#define SLINK_IE_RXC (1 << 27)
+#define SLINK_DMA_EN (1 << 31)
+
+#define SLINK_STATUS2 0x01c
+#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
+#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
+
+#define SLINK_TX_FIFO 0x100
+#define SLINK_RX_FIFO 0x180
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
+#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
+
+#define SLINK_STATUS2_RESET \
+ (TX_FIFO_EMPTY_COUNT_MAX | RX_FIFO_FULL_COUNT_ZERO << 16)
+
+#define MAX_CHIP_SELECT 4
+#define SLINK_FIFO_DEPTH 32
+
+struct tegra_slink_chip_data {
+ bool cs_hold_time;
+};
+
+struct tegra_slink_data {
+ struct device *dev;
+ struct spi_master *master;
+ const struct tegra_slink_chip_data *chip_data;
+ spinlock_t lock;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned irq;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+ unsigned cur_direction;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+ u32 packed_size;
+
+ u32 command_reg;
+ u32 command2_reg;
+ u32 dma_control_reg;
+ u32 def_command_reg;
+ u32 def_command2_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+};
+
+static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
+ u32 val, unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+
+ /* Read back register to make sure that register writes completed */
+ if (reg != SLINK_TX_FIFO)
+ readl(tspi->base + SLINK_MAS_DATA);
+}
+
+static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
+{
+ u32 val_write;
+
+ tegra_slink_readl(tspi, SLINK_STATUS);
+
+ /* Write 1 to clear status register */
+ val_write = SLINK_RDY | SLINK_FIFO_ERROR;
+ tegra_slink_writel(tspi, val_write, SLINK_STATUS);
+}
+
+static u32 tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
+ struct spi_transfer *t)
+{
+ switch (tspi->bytes_per_word) {
+ case 0:
+ return SLINK_PACK_SIZE_4;
+ case 1:
+ return SLINK_PACK_SIZE_8;
+ case 2:
+ return SLINK_PACK_SIZE_16;
+ case 4:
+ return SLINK_PACK_SIZE_32;
+ default:
+ return 0;
+ }
+}
+
+static unsigned tegra_slink_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_slink_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ bits_per_word = t->bits_per_word;
+ tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = true;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = false;
+ tspi->words_per_32bit = 1;
+ }
+ tspi->packed_size = tegra_slink_get_packed_size(tspi, t);
+
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = max_len/4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+ return total_fifo_words;
+}
+
+static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ u32 fifo_status;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned int written_words;
+ unsigned fifo_words_left;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+
+ fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
+ tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ fifo_words_left = tx_empty_count * tspi->words_per_32bit;
+ written_words = min(fifo_words_left, tspi->curr_dma_words);
+ nbytes = written_words * tspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(nbytes, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ nbytes = written_words * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
+}
+
+static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ u32 fifo_status;
+ unsigned i, count;
+ unsigned int read_words = 0;
+ unsigned len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+
+ fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
+ rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
+
+static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
+ if (tspi->is_packed) {
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = 0;
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tspi->tx_dma_buf[count] = x;
+ }
+ }
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_slink_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
+{
+ reinit_completion(&tspi->tx_dma_complete);
+ tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
+ tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->tx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tspi->tx_dma_desc->callback = tegra_slink_dma_complete;
+ tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
+
+ dmaengine_submit(tspi->tx_dma_desc);
+ dma_async_issue_pending(tspi->tx_dma_chan);
+ return 0;
+}
+
+static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
+{
+ reinit_completion(&tspi->rx_dma_complete);
+ tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
+ tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->rx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tspi->rx_dma_desc->callback = tegra_slink_dma_complete;
+ tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
+
+ dmaengine_submit(tspi->rx_dma_desc);
+ dma_async_issue_pending(tspi->rx_dma_chan);
+ return 0;
+}
+
+static int tegra_slink_start_dma_based_transfer(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned int len;
+ int ret = 0;
+ u32 status;
+
+ /* Make sure that Rx and Tx fifo are empty */
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
+ dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
+ (unsigned)status);
+ return -EIO;
+ }
+
+ val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
+ val |= tspi->packed_size;
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
+
+ /* Set attention level based on length of transfer */
+ if (len & 0xF)
+ val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
+ else
+ val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ wmb();
+ ret = tegra_slink_start_tx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting tx dma failed, err %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for tx fifo to be fill before starting slink */
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ while (!(status & SLINK_TX_FULL))
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ ret = tegra_slink_start_rx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting rx dma failed, err %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ /* HW need small delay after settign Packed mode */
+ udelay(1);
+ }
+ tspi->dma_control_reg = val;
+
+ val |= SLINK_DMA_EN;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ return ret;
+}
+
+static int tegra_slink_start_cpu_based_transfer(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned cur_words;
+
+ val = tspi->packed_size;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ cur_words = tspi->curr_dma_words;
+ val |= SLINK_DMA_BLOCK_SIZE(cur_words - 1);
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
+ tspi->dma_control_reg = val;
+ val |= SLINK_DMA_EN;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ return 0;
+}
+
+static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+ struct dma_slave_config dma_sconfig;
+
+ dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan))
+ return dev_err_probe(tspi->dev, PTR_ERR(dma_chan),
+ "Dma channel is not available\n");
+
+ dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+
+ if (dma_to_memory) {
+ dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = 0;
+ } else {
+ dma_sconfig.dst_addr = tspi->phys + SLINK_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = 0;
+ }
+
+ ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+ if (ret)
+ goto scrub;
+ if (dma_to_memory) {
+ tspi->rx_dma_chan = dma_chan;
+ tspi->rx_dma_buf = dma_buf;
+ tspi->rx_dma_phys = dma_phys;
+ } else {
+ tspi->tx_dma_chan = dma_chan;
+ tspi->tx_dma_buf = dma_buf;
+ tspi->tx_dma_phys = dma_phys;
+ }
+ return 0;
+
+scrub:
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+ return ret;
+}
+
+static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
+ bool dma_to_memory)
+{
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_buf = tspi->rx_dma_buf;
+ dma_chan = tspi->rx_dma_chan;
+ dma_phys = tspi->rx_dma_phys;
+ tspi->rx_dma_chan = NULL;
+ tspi->rx_dma_buf = NULL;
+ } else {
+ dma_buf = tspi->tx_dma_buf;
+ dma_chan = tspi->tx_dma_chan;
+ dma_phys = tspi->tx_dma_phys;
+ tspi->tx_dma_buf = NULL;
+ tspi->tx_dma_chan = NULL;
+ }
+ if (!dma_chan)
+ return;
+
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+}
+
+static int tegra_slink_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
+ unsigned total_fifo_words;
+ int ret;
+ u32 command;
+ u32 command2;
+
+ bits_per_word = t->bits_per_word;
+ speed = t->speed_hz;
+ if (speed != tspi->cur_speed) {
+ dev_pm_opp_set_rate(tspi->dev, speed * 4);
+ tspi->cur_speed = speed;
+ }
+
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->curr_xfer = t;
+ total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
+
+ command = tspi->command_reg;
+ command &= ~SLINK_BIT_LENGTH(~0);
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ command2 = tspi->command2_reg;
+ command2 &= ~(SLINK_RXEN | SLINK_TXEN);
+
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command2 |= SLINK_RXEN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command2 |= SLINK_TXEN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+
+ /*
+ * Writing to the command2 register bevore the command register prevents
+ * a spike in chip_select line 0. This selects the chip_select line
+ * before changing the chip_select value.
+ */
+ tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
+ tspi->command2_reg = command2;
+
+ tegra_slink_writel(tspi, command, SLINK_COMMAND);
+ tspi->command_reg = command;
+
+ if (total_fifo_words > SLINK_FIFO_DEPTH)
+ ret = tegra_slink_start_dma_based_transfer(tspi, t);
+ else
+ ret = tegra_slink_start_cpu_based_transfer(tspi, t);
+ return ret;
+}
+
+static int tegra_slink_setup(struct spi_device *spi)
+{
+ static const u32 cs_pol_bit[MAX_CHIP_SELECT] = {
+ SLINK_CS_POLARITY,
+ SLINK_CS_POLARITY1,
+ SLINK_CS_POLARITY2,
+ SLINK_CS_POLARITY3,
+ };
+
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ u32 val;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+ ret = pm_runtime_resume_and_get(tspi->dev);
+ if (ret < 0) {
+ dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ val = tspi->def_command_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val |= cs_pol_bit[spi->chip_select];
+ else
+ val &= ~cs_pol_bit[spi->chip_select];
+ tspi->def_command_reg = val;
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static int tegra_slink_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+
+ tegra_slink_clear_status(tspi);
+
+ tspi->command_reg = tspi->def_command_reg;
+ tspi->command_reg |= SLINK_CS_SW | SLINK_CS_VALUE;
+
+ tspi->command2_reg = tspi->def_command2_reg;
+ tspi->command2_reg |= SLINK_SS_EN_CS(spi->chip_select);
+
+ tspi->command_reg &= ~SLINK_MODES;
+ if (spi->mode & SPI_CPHA)
+ tspi->command_reg |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_LOW;
+
+ return 0;
+}
+
+static int tegra_slink_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ reinit_completion(&tspi->xfer_completion);
+ ret = tegra_slink_start_transfer_one(spi, xfer);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SLINK_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi transfer timeout, err %d\n", ret);
+ return -EIO;
+ }
+
+ if (tspi->tx_status)
+ return tspi->tx_status;
+ if (tspi->rx_status)
+ return tspi->rx_status;
+
+ return 0;
+}
+
+static int tegra_slink_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+
+ return 0;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status ||
+ (tspi->status_reg & SLINK_BSY)) {
+ dev_err(tspi->dev,
+ "CpuXfer ERROR bit set 0x%x\n", tspi->status_reg);
+ dev_err(tspi->dev,
+ "CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
+ tspi->command2_reg, tspi->dma_control_reg);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ tegra_slink_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ dev_err(tspi->dev, "TxDma Xfer failed\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ dev_err(tspi->dev, "RxDma Xfer failed\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(tspi->dev,
+ "DmaXfer: ERROR bit set 0x%x\n", tspi->status_reg);
+ dev_err(tspi->dev,
+ "DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
+ tspi->command2_reg, tspi->dma_control_reg);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_assert(tspi->rst);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = tegra_slink_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SLINK_FIFO_DEPTH)
+ err = tegra_slink_start_dma_based_transfer(tspi, t);
+ else
+ err = tegra_slink_start_cpu_based_transfer(tspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_slink_isr_thread(int irq, void *context_data)
+{
+ struct tegra_slink_data *tspi = context_data;
+
+ if (!tspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tspi);
+ return handle_dma_based_xfer(tspi);
+}
+
+static irqreturn_t tegra_slink_isr(int irq, void *context_data)
+{
+ struct tegra_slink_data *tspi = context_data;
+
+ tspi->status_reg = tegra_slink_readl(tspi, SLINK_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SLINK_TX_OVF | SLINK_TX_UNF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SLINK_RX_OVF | SLINK_RX_UNF);
+ tegra_slink_clear_status(tspi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static const struct tegra_slink_chip_data tegra30_spi_cdata = {
+ .cs_hold_time = true,
+};
+
+static const struct tegra_slink_chip_data tegra20_spi_cdata = {
+ .cs_hold_time = false,
+};
+
+static const struct of_device_id tegra_slink_of_match[] = {
+ { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
+ { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
+
+static int tegra_slink_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_slink_data *tspi;
+ struct resource *r;
+ int ret, spi_irq;
+ const struct tegra_slink_chip_data *cdata = NULL;
+
+ cdata = of_device_get_match_data(&pdev->dev);
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tegra_slink_setup;
+ master->prepare_message = tegra_slink_prepare_message;
+ master->transfer_one = tegra_slink_transfer_one;
+ master->unprepare_message = tegra_slink_unprepare_message;
+ master->auto_runtime_pm = true;
+ master->num_chipselect = MAX_CHIP_SELECT;
+
+ platform_set_drvdata(pdev, master);
+ tspi = spi_master_get_devdata(master);
+ tspi->master = master;
+ tspi->dev = &pdev->dev;
+ tspi->chip_data = cdata;
+ spin_lock_init(&tspi->lock);
+
+ if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No IO memory resource\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+ tspi->phys = r->start;
+ tspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tspi->base)) {
+ ret = PTR_ERR(tspi->base);
+ goto exit_free_master;
+ }
+
+ /* disabled clock may cause interrupt storm upon request */
+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tspi->clk)) {
+ ret = PTR_ERR(tspi->clk);
+ dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+ goto exit_free_master;
+ }
+
+ tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+ goto exit_free_master;
+ }
+
+ ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (ret)
+ goto exit_free_master;
+
+ tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+
+ ret = tegra_slink_init_dma_param(tspi, true);
+ if (ret < 0)
+ goto exit_free_master;
+ ret = tegra_slink_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+
+ init_completion(&tspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+
+ spi_irq = platform_get_irq(pdev, 0);
+ if (spi_irq < 0)
+ return spi_irq;
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+ tegra_slink_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_pm_put;
+ }
+
+ tspi->def_command_reg = SLINK_M_S;
+ tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_free_irq;
+ }
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+
+exit_free_irq:
+ free_irq(spi_irq, tspi);
+exit_pm_put:
+ pm_runtime_put(&pdev->dev);
+exit_pm_disable:
+ pm_runtime_force_suspend(&pdev->dev);
+
+ tegra_slink_deinit_dma_param(tspi, false);
+exit_rx_dma_free:
+ tegra_slink_deinit_dma_param(tspi, true);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_slink_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ spi_unregister_master(master);
+
+ free_irq(tspi->irq, tspi);
+
+ pm_runtime_force_suspend(&pdev->dev);
+
+ if (tspi->tx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, false);
+
+ if (tspi->rx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, true);
+
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_slink_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_slink_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_slink_writel(tspi, tspi->command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_slink_readl(tspi, SLINK_MAS_DATA);
+
+ clk_disable_unprepare(tspi->clk);
+ return 0;
+}
+
+static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops slink_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
+ tegra_slink_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_slink_suspend, tegra_slink_resume)
+};
+static struct platform_driver tegra_slink_driver = {
+ .driver = {
+ .name = "spi-tegra-slink",
+ .pm = &slink_pm_ops,
+ .of_match_table = tegra_slink_of_match,
+ },
+ .probe = tegra_slink_probe,
+ .remove = tegra_slink_remove,
+};
+module_platform_driver(tegra_slink_driver);
+
+MODULE_ALIAS("platform:spi-tegra-slink");
+MODULE_DESCRIPTION("NVIDIA Tegra20/Tegra30 SLINK Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
new file mode 100644
index 000000000..06c54d490
--- /dev/null
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -0,0 +1,1723 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2020 NVIDIA CORPORATION.
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/property.h>
+
+#define QSPI_COMMAND1 0x000
+#define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define QSPI_PACKED BIT(5)
+#define QSPI_INTERFACE_WIDTH_MASK (0x03 << 7)
+#define QSPI_INTERFACE_WIDTH(x) (((x) & 0x03) << 7)
+#define QSPI_INTERFACE_WIDTH_SINGLE QSPI_INTERFACE_WIDTH(0)
+#define QSPI_INTERFACE_WIDTH_DUAL QSPI_INTERFACE_WIDTH(1)
+#define QSPI_INTERFACE_WIDTH_QUAD QSPI_INTERFACE_WIDTH(2)
+#define QSPI_SDR_DDR_SEL BIT(9)
+#define QSPI_TX_EN BIT(11)
+#define QSPI_RX_EN BIT(12)
+#define QSPI_CS_SW_VAL BIT(20)
+#define QSPI_CS_SW_HW BIT(21)
+
+#define QSPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
+#define QSPI_CS_POL_INACTIVE_MASK (0xF << 22)
+#define QSPI_CS_SEL_0 (0 << 26)
+#define QSPI_CS_SEL_1 (1 << 26)
+#define QSPI_CS_SEL_2 (2 << 26)
+#define QSPI_CS_SEL_3 (3 << 26)
+#define QSPI_CS_SEL_MASK (3 << 26)
+#define QSPI_CS_SEL(x) (((x) & 0x3) << 26)
+
+#define QSPI_CONTROL_MODE_0 (0 << 28)
+#define QSPI_CONTROL_MODE_3 (3 << 28)
+#define QSPI_CONTROL_MODE_MASK (3 << 28)
+#define QSPI_M_S BIT(30)
+#define QSPI_PIO BIT(31)
+
+#define QSPI_COMMAND2 0x004
+#define QSPI_TX_TAP_DELAY(x) (((x) & 0x3f) << 10)
+#define QSPI_RX_TAP_DELAY(x) (((x) & 0xff) << 0)
+
+#define QSPI_CS_TIMING1 0x008
+#define QSPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
+
+#define QSPI_CS_TIMING2 0x00c
+#define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1f) << 0)
+#define CS_ACTIVE_BETWEEN_PACKETS_0 BIT(5)
+
+#define QSPI_TRANS_STATUS 0x010
+#define QSPI_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define QSPI_RDY BIT(30)
+
+#define QSPI_FIFO_STATUS 0x014
+#define QSPI_RX_FIFO_EMPTY BIT(0)
+#define QSPI_RX_FIFO_FULL BIT(1)
+#define QSPI_TX_FIFO_EMPTY BIT(2)
+#define QSPI_TX_FIFO_FULL BIT(3)
+#define QSPI_RX_FIFO_UNF BIT(4)
+#define QSPI_RX_FIFO_OVF BIT(5)
+#define QSPI_TX_FIFO_UNF BIT(6)
+#define QSPI_TX_FIFO_OVF BIT(7)
+#define QSPI_ERR BIT(8)
+#define QSPI_TX_FIFO_FLUSH BIT(14)
+#define QSPI_RX_FIFO_FLUSH BIT(15)
+#define QSPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7f)
+#define QSPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7f)
+
+#define QSPI_FIFO_ERROR (QSPI_RX_FIFO_UNF | \
+ QSPI_RX_FIFO_OVF | \
+ QSPI_TX_FIFO_UNF | \
+ QSPI_TX_FIFO_OVF)
+#define QSPI_FIFO_EMPTY (QSPI_RX_FIFO_EMPTY | \
+ QSPI_TX_FIFO_EMPTY)
+
+#define QSPI_TX_DATA 0x018
+#define QSPI_RX_DATA 0x01c
+
+#define QSPI_DMA_CTL 0x020
+#define QSPI_TX_TRIG(n) (((n) & 0x3) << 15)
+#define QSPI_TX_TRIG_1 QSPI_TX_TRIG(0)
+#define QSPI_TX_TRIG_4 QSPI_TX_TRIG(1)
+#define QSPI_TX_TRIG_8 QSPI_TX_TRIG(2)
+#define QSPI_TX_TRIG_16 QSPI_TX_TRIG(3)
+
+#define QSPI_RX_TRIG(n) (((n) & 0x3) << 19)
+#define QSPI_RX_TRIG_1 QSPI_RX_TRIG(0)
+#define QSPI_RX_TRIG_4 QSPI_RX_TRIG(1)
+#define QSPI_RX_TRIG_8 QSPI_RX_TRIG(2)
+#define QSPI_RX_TRIG_16 QSPI_RX_TRIG(3)
+
+#define QSPI_DMA_EN BIT(31)
+
+#define QSPI_DMA_BLK 0x024
+#define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
+
+#define QSPI_TX_FIFO 0x108
+#define QSPI_RX_FIFO 0x188
+
+#define QSPI_FIFO_DEPTH 64
+
+#define QSPI_INTR_MASK 0x18c
+#define QSPI_INTR_RX_FIFO_UNF_MASK BIT(25)
+#define QSPI_INTR_RX_FIFO_OVF_MASK BIT(26)
+#define QSPI_INTR_TX_FIFO_UNF_MASK BIT(27)
+#define QSPI_INTR_TX_FIFO_OVF_MASK BIT(28)
+#define QSPI_INTR_RDY_MASK BIT(29)
+#define QSPI_INTR_RX_TX_FIFO_ERR (QSPI_INTR_RX_FIFO_UNF_MASK | \
+ QSPI_INTR_RX_FIFO_OVF_MASK | \
+ QSPI_INTR_TX_FIFO_UNF_MASK | \
+ QSPI_INTR_TX_FIFO_OVF_MASK)
+
+#define QSPI_MISC_REG 0x194
+#define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0)
+#define QSPI_DUMMY_CYCLES_MAX 0xff
+
+#define QSPI_CMB_SEQ_CMD 0x19c
+#define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0)
+
+#define QSPI_CMB_SEQ_CMD_CFG 0x1a0
+#define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13)
+#define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13)
+#define QSPI_COMMAND_SDR_DDR BIT(12)
+#define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0)
+
+#define QSPI_GLOBAL_CONFIG 0X1a4
+#define QSPI_CMB_SEQ_EN BIT(0)
+
+#define QSPI_CMB_SEQ_ADDR 0x1a8
+#define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0)
+
+#define QSPI_CMB_SEQ_ADDR_CFG 0x1ac
+#define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13)
+#define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13)
+#define QSPI_ADDRESS_SDR_DDR BIT(12)
+#define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0)
+
+#define DATA_DIR_TX BIT(0)
+#define DATA_DIR_RX BIT(1)
+
+#define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
+#define CMD_TRANSFER 0
+#define ADDR_TRANSFER 1
+#define DATA_TRANSFER 2
+
+struct tegra_qspi_soc_data {
+ bool has_dma;
+ bool cmb_xfer_capable;
+ unsigned int cs_count;
+};
+
+struct tegra_qspi_client_data {
+ int tx_clk_tap_delay;
+ int rx_clk_tap_delay;
+};
+
+struct tegra_qspi {
+ struct device *dev;
+ struct spi_master *master;
+ /* lock to protect data accessed by irq */
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned int irq;
+
+ u32 cur_speed;
+ unsigned int cur_pos;
+ unsigned int words_per_32bit;
+ unsigned int bytes_per_word;
+ unsigned int curr_dma_words;
+ unsigned int cur_direction;
+
+ unsigned int cur_rx_pos;
+ unsigned int cur_tx_pos;
+
+ unsigned int dma_buf_size;
+ unsigned int max_buf_size;
+ bool is_curr_dma_xfer;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+ bool use_dma;
+
+ u32 command1_reg;
+ u32 dma_control_reg;
+ u32 def_command1_reg;
+ u32 def_command2_reg;
+ u32 spi_cs_timing1;
+ u32 spi_cs_timing2;
+ u8 dummy_cycles;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+ const struct tegra_qspi_soc_data *soc_data;
+};
+
+static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
+{
+ return readl(tqspi->base + offset);
+}
+
+static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
+{
+ writel(value, tqspi->base + offset);
+
+ /* read back register to make sure that register writes completed */
+ if (offset != QSPI_TX_FIFO)
+ readl(tqspi->base + QSPI_COMMAND1);
+}
+
+static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
+{
+ u32 value;
+
+ /* write 1 to clear status register */
+ value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
+ tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
+
+ value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
+ if (!(value & QSPI_INTR_RDY_MASK)) {
+ value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
+ tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
+ }
+
+ /* clear fifo status error if any */
+ value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
+ if (value & QSPI_ERR)
+ tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
+}
+
+static unsigned int
+tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ unsigned int max_word, max_len, total_fifo_words;
+ unsigned int remain_len = t->len - tqspi->cur_pos;
+ unsigned int bits_per_word = t->bits_per_word;
+
+ tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
+
+ /*
+ * Tegra QSPI controller supports packed or unpacked mode transfers.
+ * Packed mode is used for data transfers using 8, 16, or 32 bits per
+ * word with a minimum transfer of 1 word and for all other transfers
+ * unpacked mode will be used.
+ */
+
+ if ((bits_per_word == 8 || bits_per_word == 16 ||
+ bits_per_word == 32) && t->len > 3) {
+ tqspi->is_packed = true;
+ tqspi->words_per_32bit = 32 / bits_per_word;
+ } else {
+ tqspi->is_packed = false;
+ tqspi->words_per_32bit = 1;
+ }
+
+ if (tqspi->is_packed) {
+ max_len = min(remain_len, tqspi->max_buf_size);
+ tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
+ total_fifo_words = (max_len + 3) / 4;
+ } else {
+ max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
+ max_word = min(max_word, tqspi->max_buf_size / 4);
+ tqspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+
+ return total_fifo_words;
+}
+
+static unsigned int
+tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ unsigned int written_words, fifo_words_left, count;
+ unsigned int len, tx_empty_count, max_n_32bit, i;
+ u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
+ u32 fifo_status;
+
+ fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
+ tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tqspi->is_packed) {
+ fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
+ written_words = min(fifo_words_left, tqspi->curr_dma_words);
+ len = written_words * tqspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(len, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+
+ for (i = 0; (i < 4) && len; i++, len--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
+ }
+
+ tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
+ } else {
+ unsigned int write_bytes;
+ u8 bytes_per_word = tqspi->bytes_per_word;
+
+ max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ len = written_words * tqspi->bytes_per_word;
+ if (len > t->len - tqspi->cur_pos)
+ len = t->len - tqspi->cur_pos;
+ write_bytes = len;
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+
+ for (i = 0; len && (i < bytes_per_word); i++, len--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
+ }
+
+ tqspi->cur_tx_pos += write_bytes;
+ }
+
+ return written_words;
+}
+
+static unsigned int
+tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
+ unsigned int len, rx_full_count, count, i;
+ unsigned int read_words = 0;
+ u32 fifo_status, x;
+
+ fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
+ rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tqspi->is_packed) {
+ len = tqspi->curr_dma_words * tqspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
+
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i * 8) & 0xff;
+ }
+
+ read_words += tqspi->curr_dma_words;
+ tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
+ } else {
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ u8 bytes_per_word = tqspi->bytes_per_word;
+ unsigned int read_bytes;
+
+ len = rx_full_count * bytes_per_word;
+ if (len > t->len - tqspi->cur_pos)
+ len = t->len - tqspi->cur_pos;
+ read_bytes = len;
+ for (count = 0; count < rx_full_count; count++) {
+ x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
+
+ for (i = 0; len && (i < bytes_per_word); i++, len--)
+ *rx_buf++ = (x >> (i * 8)) & 0xff;
+ }
+
+ read_words += rx_full_count;
+ tqspi->cur_rx_pos += read_bytes;
+ }
+
+ return read_words;
+}
+
+static void
+tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
+ tqspi->dma_buf_size, DMA_TO_DEVICE);
+
+ /*
+ * In packed mode, each word in FIFO may contain multiple packets
+ * based on bits per word. So all bytes in each FIFO word are valid.
+ *
+ * In unpacked mode, each word in FIFO contains single packet and
+ * based on bits per word any remaining bits in FIFO word will be
+ * ignored by the hardware and are invalid bits.
+ */
+ if (tqspi->is_packed) {
+ tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
+ } else {
+ u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
+ unsigned int i, count, consume, write_bytes;
+
+ /*
+ * Fill tx_dma_buf to contain single packet in each word based
+ * on bits per word from SPI core tx_buf.
+ */
+ consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
+ if (consume > t->len - tqspi->cur_pos)
+ consume = t->len - tqspi->cur_pos;
+ write_bytes = consume;
+ for (count = 0; count < tqspi->curr_dma_words; count++) {
+ u32 x = 0;
+
+ for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tqspi->tx_dma_buf[count] = x;
+ }
+
+ tqspi->cur_tx_pos += write_bytes;
+ }
+
+ dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
+ tqspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void
+tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
+ tqspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tqspi->is_packed) {
+ tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
+ } else {
+ unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ unsigned int i, count, consume, read_bytes;
+
+ /*
+ * Each FIFO word contains single data packet.
+ * Skip invalid bits in each FIFO word based on bits per word
+ * and align bytes while filling in SPI core rx_buf.
+ */
+ consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
+ if (consume > t->len - tqspi->cur_pos)
+ consume = t->len - tqspi->cur_pos;
+ read_bytes = consume;
+ for (count = 0; count < tqspi->curr_dma_words; count++) {
+ u32 x = tqspi->rx_dma_buf[count] & rx_mask;
+
+ for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
+ *rx_buf++ = (x >> (i * 8)) & 0xff;
+ }
+
+ tqspi->cur_rx_pos += read_bytes;
+ }
+
+ dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
+ tqspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_qspi_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
+{
+ dma_addr_t tx_dma_phys;
+
+ reinit_completion(&tqspi->tx_dma_complete);
+
+ if (tqspi->is_packed)
+ tx_dma_phys = t->tx_dma;
+ else
+ tx_dma_phys = tqspi->tx_dma_phys;
+
+ tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
+ len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!tqspi->tx_dma_desc) {
+ dev_err(tqspi->dev, "Unable to get TX descriptor\n");
+ return -EIO;
+ }
+
+ tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
+ tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
+ dmaengine_submit(tqspi->tx_dma_desc);
+ dma_async_issue_pending(tqspi->tx_dma_chan);
+
+ return 0;
+}
+
+static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
+{
+ dma_addr_t rx_dma_phys;
+
+ reinit_completion(&tqspi->rx_dma_complete);
+
+ if (tqspi->is_packed)
+ rx_dma_phys = t->rx_dma;
+ else
+ rx_dma_phys = tqspi->rx_dma_phys;
+
+ tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
+ len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!tqspi->rx_dma_desc) {
+ dev_err(tqspi->dev, "Unable to get RX descriptor\n");
+ return -EIO;
+ }
+
+ tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
+ tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
+ dmaengine_submit(tqspi->rx_dma_desc);
+ dma_async_issue_pending(tqspi->rx_dma_chan);
+
+ return 0;
+}
+
+static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
+{
+ void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
+ u32 val;
+
+ val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
+ if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
+ return 0;
+
+ val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
+ tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
+
+ if (!atomic)
+ return readl_relaxed_poll_timeout(addr, val,
+ (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
+ 1000, 1000000);
+
+ return readl_relaxed_poll_timeout_atomic(addr, val,
+ (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
+ 1000, 1000000);
+}
+
+static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
+{
+ u32 intr_mask;
+
+ intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
+ intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
+ tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
+}
+
+static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
+ u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
+ unsigned int len;
+
+ len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
+
+ if (t->tx_buf) {
+ t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tqspi->dev, t->tx_dma))
+ return -ENOMEM;
+ }
+
+ if (t->rx_buf) {
+ t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
+ dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ unsigned int len;
+
+ len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
+
+ dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
+ dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
+}
+
+static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
+{
+ struct dma_slave_config dma_sconfig = { 0 };
+ unsigned int len;
+ u8 dma_burst;
+ int ret = 0;
+ u32 val;
+
+ if (tqspi->is_packed) {
+ ret = tegra_qspi_dma_map_xfer(tqspi, t);
+ if (ret < 0)
+ return ret;
+ }
+
+ val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
+ tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
+
+ tegra_qspi_unmask_irq(tqspi);
+
+ if (tqspi->is_packed)
+ len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
+ else
+ len = tqspi->curr_dma_words * 4;
+
+ /* set attention level based on length of transfer */
+ val = 0;
+ if (len & 0xf) {
+ val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
+ dma_burst = 1;
+ } else if (((len) >> 4) & 0x1) {
+ val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
+ dma_burst = 4;
+ } else {
+ val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
+ dma_burst = 8;
+ }
+
+ tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
+ tqspi->dma_control_reg = val;
+
+ dma_sconfig.device_fc = true;
+ if (tqspi->cur_direction & DATA_DIR_TX) {
+ dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
+ return ret;
+ }
+
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
+ ret = tegra_qspi_start_tx_dma(tqspi, t, len);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (tqspi->cur_direction & DATA_DIR_RX) {
+ dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
+ return ret;
+ }
+
+ dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
+ tqspi->dma_buf_size,
+ DMA_FROM_DEVICE);
+
+ ret = tegra_qspi_start_rx_dma(tqspi, t, len);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ return ret;
+ }
+ }
+
+ tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
+
+ tqspi->is_curr_dma_xfer = true;
+ tqspi->dma_control_reg = val;
+ val |= QSPI_DMA_EN;
+ tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
+
+ return ret;
+}
+
+static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned int cur_words;
+
+ if (qspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
+ else
+ cur_words = qspi->curr_dma_words;
+
+ val = QSPI_DMA_BLK_SET(cur_words - 1);
+ tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
+
+ tegra_qspi_unmask_irq(qspi);
+
+ qspi->is_curr_dma_xfer = false;
+ val = qspi->command1_reg;
+ val |= QSPI_PIO;
+ tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
+
+ return 0;
+}
+
+static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
+{
+ if (!tqspi->soc_data->has_dma)
+ return;
+
+ if (tqspi->tx_dma_buf) {
+ dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
+ tqspi->tx_dma_buf, tqspi->tx_dma_phys);
+ tqspi->tx_dma_buf = NULL;
+ }
+
+ if (tqspi->tx_dma_chan) {
+ dma_release_channel(tqspi->tx_dma_chan);
+ tqspi->tx_dma_chan = NULL;
+ }
+
+ if (tqspi->rx_dma_buf) {
+ dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
+ tqspi->rx_dma_buf, tqspi->rx_dma_phys);
+ tqspi->rx_dma_buf = NULL;
+ }
+
+ if (tqspi->rx_dma_chan) {
+ dma_release_channel(tqspi->rx_dma_chan);
+ tqspi->rx_dma_chan = NULL;
+ }
+}
+
+static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
+{
+ struct dma_chan *dma_chan;
+ dma_addr_t dma_phys;
+ u32 *dma_buf;
+ int err;
+
+ if (!tqspi->soc_data->has_dma)
+ return 0;
+
+ dma_chan = dma_request_chan(tqspi->dev, "rx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
+
+ tqspi->rx_dma_chan = dma_chan;
+
+ dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ tqspi->rx_dma_buf = dma_buf;
+ tqspi->rx_dma_phys = dma_phys;
+
+ dma_chan = dma_request_chan(tqspi->dev, "tx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
+
+ tqspi->tx_dma_chan = dma_chan;
+
+ dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ tqspi->tx_dma_buf = dma_buf;
+ tqspi->tx_dma_phys = dma_phys;
+ tqspi->use_dma = true;
+
+ return 0;
+
+err_out:
+ tegra_qspi_deinit_dma(tqspi);
+
+ if (err != -EPROBE_DEFER) {
+ dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
+ dev_err(tqspi->dev, "falling back to PIO\n");
+ return 0;
+ }
+
+ return err;
+}
+
+static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
+ bool is_first_of_msg)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi_client_data *cdata = spi->controller_data;
+ u32 command1, command2, speed = t->speed_hz;
+ u8 bits_per_word = t->bits_per_word;
+ u32 tx_tap = 0, rx_tap = 0;
+ int req_mode;
+
+ if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
+ clk_set_rate(tqspi->clk, speed);
+ tqspi->cur_speed = speed;
+ }
+
+ tqspi->cur_pos = 0;
+ tqspi->cur_rx_pos = 0;
+ tqspi->cur_tx_pos = 0;
+ tqspi->curr_xfer = t;
+
+ if (is_first_of_msg) {
+ tegra_qspi_mask_clear_irq(tqspi);
+
+ command1 = tqspi->def_command1_reg;
+ command1 |= QSPI_CS_SEL(spi->chip_select);
+ command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
+
+ command1 &= ~QSPI_CONTROL_MODE_MASK;
+ req_mode = spi->mode & 0x3;
+ if (req_mode == SPI_MODE_3)
+ command1 |= QSPI_CONTROL_MODE_3;
+ else
+ command1 |= QSPI_CONTROL_MODE_0;
+
+ if (spi->mode & SPI_CS_HIGH)
+ command1 |= QSPI_CS_SW_VAL;
+ else
+ command1 &= ~QSPI_CS_SW_VAL;
+ tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
+
+ if (cdata && cdata->tx_clk_tap_delay)
+ tx_tap = cdata->tx_clk_tap_delay;
+
+ if (cdata && cdata->rx_clk_tap_delay)
+ rx_tap = cdata->rx_clk_tap_delay;
+
+ command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
+ if (command2 != tqspi->def_command2_reg)
+ tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
+
+ } else {
+ command1 = tqspi->command1_reg;
+ command1 &= ~QSPI_BIT_LENGTH(~0);
+ command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
+ }
+
+ command1 &= ~QSPI_SDR_DDR_SEL;
+
+ return command1;
+}
+
+static int tegra_qspi_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, u32 command1)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ unsigned int total_fifo_words;
+ u8 bus_width = 0;
+ int ret;
+
+ total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
+
+ command1 &= ~QSPI_PACKED;
+ if (tqspi->is_packed)
+ command1 |= QSPI_PACKED;
+ tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
+
+ tqspi->cur_direction = 0;
+
+ command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
+ if (t->rx_buf) {
+ command1 |= QSPI_RX_EN;
+ tqspi->cur_direction |= DATA_DIR_RX;
+ bus_width = t->rx_nbits;
+ }
+
+ if (t->tx_buf) {
+ command1 |= QSPI_TX_EN;
+ tqspi->cur_direction |= DATA_DIR_TX;
+ bus_width = t->tx_nbits;
+ }
+
+ command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
+
+ if (bus_width == SPI_NBITS_QUAD)
+ command1 |= QSPI_INTERFACE_WIDTH_QUAD;
+ else if (bus_width == SPI_NBITS_DUAL)
+ command1 |= QSPI_INTERFACE_WIDTH_DUAL;
+ else
+ command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
+
+ tqspi->command1_reg = command1;
+
+ tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
+
+ ret = tegra_qspi_flush_fifos(tqspi, false);
+ if (ret < 0)
+ return ret;
+
+ if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
+ ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
+ else
+ ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
+
+ return ret;
+}
+
+static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
+{
+ struct tegra_qspi_client_data *cdata;
+ struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+
+ cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
+ if (!cdata)
+ return NULL;
+
+ device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
+ &cdata->tx_clk_tap_delay);
+ device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
+ &cdata->rx_clk_tap_delay);
+
+ return cdata;
+}
+
+static int tegra_qspi_setup(struct spi_device *spi)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi_client_data *cdata = spi->controller_data;
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(tqspi->dev);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
+ return ret;
+ }
+
+ if (!cdata) {
+ cdata = tegra_qspi_parse_cdata_dt(spi);
+ spi->controller_data = cdata;
+ }
+ spin_lock_irqsave(&tqspi->lock, flags);
+
+ /* keep default cs state to inactive */
+ val = tqspi->def_command1_reg;
+ val |= QSPI_CS_SEL(spi->chip_select);
+ if (spi->mode & SPI_CS_HIGH)
+ val &= ~QSPI_CS_POL_INACTIVE(spi->chip_select);
+ else
+ val |= QSPI_CS_POL_INACTIVE(spi->chip_select);
+
+ tqspi->def_command1_reg = val;
+ tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
+
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+
+ pm_runtime_put(tqspi->dev);
+
+ return 0;
+}
+
+static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
+{
+ dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
+ dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
+ tegra_qspi_readl(tqspi, QSPI_COMMAND1),
+ tegra_qspi_readl(tqspi, QSPI_COMMAND2));
+ dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
+ tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
+ tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
+ dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n",
+ tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
+ tegra_qspi_readl(tqspi, QSPI_MISC_REG));
+ dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
+ tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
+ tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
+}
+
+static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
+{
+ dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
+ tegra_qspi_dump_regs(tqspi);
+ tegra_qspi_flush_fifos(tqspi, true);
+ if (device_reset(tqspi->dev) < 0)
+ dev_warn_once(tqspi->dev, "device reset failed\n");
+}
+
+static void tegra_qspi_transfer_end(struct spi_device *spi)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
+
+ if (cs_val)
+ tqspi->command1_reg |= QSPI_CS_SW_VAL;
+ else
+ tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
+ tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
+ tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
+}
+
+static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
+{
+ u32 cmd_config = 0;
+
+ /* Extract Command configuration and value */
+ if (is_ddr)
+ cmd_config |= QSPI_COMMAND_SDR_DDR;
+ else
+ cmd_config &= ~QSPI_COMMAND_SDR_DDR;
+
+ cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
+ cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
+
+ return cmd_config;
+}
+
+static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
+{
+ u32 addr_config = 0;
+
+ /* Extract Address configuration and value */
+ is_ddr = 0; //Only SDR mode supported
+ bus_width = 0; //X1 mode
+
+ if (is_ddr)
+ addr_config |= QSPI_ADDRESS_SDR_DDR;
+ else
+ addr_config &= ~QSPI_ADDRESS_SDR_DDR;
+
+ addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
+ addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
+
+ return addr_config;
+}
+
+static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ u8 transfer_phase = 0;
+ u32 cmd1 = 0, dma_ctl = 0;
+ int ret = 0;
+ u32 address_value = 0;
+ u32 cmd_config = 0, addr_config = 0;
+ u8 cmd_value = 0, val = 0;
+
+ /* Enable Combined sequence mode */
+ val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
+ val |= QSPI_CMB_SEQ_EN;
+ tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
+ /* Process individual transfer list */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ switch (transfer_phase) {
+ case CMD_TRANSFER:
+ /* X1 SDR mode */
+ cmd_config = tegra_qspi_cmd_config(false, 0,
+ xfer->len);
+ cmd_value = *((const u8 *)(xfer->tx_buf));
+ break;
+ case ADDR_TRANSFER:
+ /* X1 SDR mode */
+ addr_config = tegra_qspi_addr_config(false, 0,
+ xfer->len);
+ address_value = *((const u32 *)(xfer->tx_buf));
+ break;
+ case DATA_TRANSFER:
+ /* Program Command, Address value in register */
+ tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
+ tegra_qspi_writel(tqspi, address_value,
+ QSPI_CMB_SEQ_ADDR);
+ /* Program Command and Address config in register */
+ tegra_qspi_writel(tqspi, cmd_config,
+ QSPI_CMB_SEQ_CMD_CFG);
+ tegra_qspi_writel(tqspi, addr_config,
+ QSPI_CMB_SEQ_ADDR_CFG);
+
+ reinit_completion(&tqspi->xfer_completion);
+ cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
+ is_first_msg);
+ ret = tegra_qspi_start_transfer_one(spi, xfer,
+ cmd1);
+
+ if (ret < 0) {
+ dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
+ ret);
+ return ret;
+ }
+
+ is_first_msg = false;
+ ret = wait_for_completion_timeout
+ (&tqspi->xfer_completion,
+ QSPI_DMA_TIMEOUT);
+
+ if (WARN_ON(ret == 0)) {
+ dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
+ ret);
+ if (tqspi->is_curr_dma_xfer &&
+ (tqspi->cur_direction & DATA_DIR_TX))
+ dmaengine_terminate_all
+ (tqspi->tx_dma_chan);
+
+ if (tqspi->is_curr_dma_xfer &&
+ (tqspi->cur_direction & DATA_DIR_RX))
+ dmaengine_terminate_all
+ (tqspi->rx_dma_chan);
+
+ /* Abort transfer by resetting pio/dma bit */
+ if (!tqspi->is_curr_dma_xfer) {
+ cmd1 = tegra_qspi_readl
+ (tqspi,
+ QSPI_COMMAND1);
+ cmd1 &= ~QSPI_PIO;
+ tegra_qspi_writel
+ (tqspi, cmd1,
+ QSPI_COMMAND1);
+ } else {
+ dma_ctl = tegra_qspi_readl
+ (tqspi,
+ QSPI_DMA_CTL);
+ dma_ctl &= ~QSPI_DMA_EN;
+ tegra_qspi_writel(tqspi, dma_ctl,
+ QSPI_DMA_CTL);
+ }
+
+ /* Reset controller if timeout happens */
+ if (device_reset(tqspi->dev) < 0)
+ dev_warn_once(tqspi->dev,
+ "device reset failed\n");
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tqspi->tx_status || tqspi->rx_status) {
+ dev_err(tqspi->dev, "QSPI Transfer failed\n");
+ tqspi->tx_status = 0;
+ tqspi->rx_status = 0;
+ ret = -EIO;
+ goto exit;
+ }
+ if (!xfer->cs_change) {
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ transfer_phase++;
+ }
+ ret = 0;
+
+exit:
+ msg->status = ret;
+ if (ret < 0) {
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
+
+ return ret;
+}
+
+static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
+ struct spi_message *msg)
+{
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *transfer;
+ bool is_first_msg = true;
+ int ret = 0, val = 0;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+ tqspi->tx_status = 0;
+ tqspi->rx_status = 0;
+
+ /* Disable Combined sequence mode */
+ val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
+ val &= ~QSPI_CMB_SEQ_EN;
+ tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+ struct spi_transfer *xfer = transfer;
+ u8 dummy_bytes = 0;
+ u32 cmd1;
+
+ tqspi->dummy_cycles = 0;
+ /*
+ * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
+ * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
+ * So, check if the next transfer is dummy data transfer and program dummy
+ * clock cycles along with the current transfer and skip next transfer.
+ */
+ if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
+ struct spi_transfer *next_xfer;
+
+ next_xfer = list_next_entry(xfer, transfer_list);
+ if (next_xfer->dummy_data) {
+ u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
+
+ if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
+ tqspi->dummy_cycles = dummy_cycles;
+ dummy_bytes = next_xfer->len;
+ transfer = next_xfer;
+ }
+ }
+ }
+
+ reinit_completion(&tqspi->xfer_completion);
+
+ cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
+
+ ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
+ goto complete_xfer;
+ }
+
+ ret = wait_for_completion_timeout(&tqspi->xfer_completion,
+ QSPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tqspi->dev, "transfer timeout\n");
+ if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ tegra_qspi_handle_error(tqspi);
+ ret = -EIO;
+ goto complete_xfer;
+ }
+
+ if (tqspi->tx_status || tqspi->rx_status) {
+ tegra_qspi_handle_error(tqspi);
+ ret = -EIO;
+ goto complete_xfer;
+ }
+
+ msg->actual_length += xfer->len + dummy_bytes;
+
+complete_xfer:
+ if (ret < 0) {
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ goto exit;
+ }
+
+ if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
+ /* de-activate CS after last transfer only when cs_change is not set */
+ if (!xfer->cs_change) {
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
+ } else if (xfer->cs_change) {
+ /* de-activated CS between the transfers only when cs_change is set */
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
+ }
+
+ ret = 0;
+exit:
+ msg->status = ret;
+
+ return ret;
+}
+
+static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
+ struct spi_message *msg)
+{
+ int transfer_count = 0;
+ struct spi_transfer *xfer;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ transfer_count++;
+ }
+ if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
+ return false;
+ xfer = list_first_entry(&msg->transfers, typeof(*xfer),
+ transfer_list);
+ if (xfer->len > 2)
+ return false;
+ xfer = list_next_entry(xfer, transfer_list);
+ if (xfer->len > 4 || xfer->len < 3)
+ return false;
+ xfer = list_next_entry(xfer, transfer_list);
+ if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
+ return false;
+
+ return true;
+}
+
+static int tegra_qspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (tegra_qspi_validate_cmb_seq(tqspi, msg))
+ ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
+ else
+ ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
+
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
+{
+ struct spi_transfer *t = tqspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tqspi->lock, flags);
+
+ if (tqspi->tx_status || tqspi->rx_status) {
+ tegra_qspi_handle_error(tqspi);
+ complete(&tqspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tqspi->cur_direction & DATA_DIR_RX)
+ tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
+
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ tqspi->cur_pos = tqspi->cur_tx_pos;
+ else
+ tqspi->cur_pos = tqspi->cur_rx_pos;
+
+ if (tqspi->cur_pos == t->len) {
+ complete(&tqspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_qspi_calculate_curr_xfer_param(tqspi, t);
+ tegra_qspi_start_cpu_based_transfer(tqspi, t);
+exit:
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
+{
+ struct spi_transfer *t = tqspi->curr_xfer;
+ unsigned int total_fifo_words;
+ unsigned long flags;
+ long wait_status;
+ int err = 0;
+
+ if (tqspi->cur_direction & DATA_DIR_TX) {
+ if (tqspi->tx_status) {
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ dev_err(tqspi->dev, "failed TX DMA transfer\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tqspi->cur_direction & DATA_DIR_RX) {
+ if (tqspi->rx_status) {
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ dev_err(tqspi->dev, "failed RX DMA transfer\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tqspi->lock, flags);
+
+ if (err) {
+ tegra_qspi_dma_unmap_xfer(tqspi, t);
+ tegra_qspi_handle_error(tqspi);
+ complete(&tqspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tqspi->cur_direction & DATA_DIR_RX)
+ tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
+
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ tqspi->cur_pos = tqspi->cur_tx_pos;
+ else
+ tqspi->cur_pos = tqspi->cur_rx_pos;
+
+ if (tqspi->cur_pos == t->len) {
+ tegra_qspi_dma_unmap_xfer(tqspi, t);
+ complete(&tqspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_qspi_dma_unmap_xfer(tqspi, t);
+
+ /* continue transfer in current message */
+ total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
+ if (total_fifo_words > QSPI_FIFO_DEPTH)
+ err = tegra_qspi_start_dma_based_transfer(tqspi, t);
+ else
+ err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
+{
+ struct tegra_qspi *tqspi = context_data;
+
+ tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
+
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
+
+ if (tqspi->cur_direction & DATA_DIR_RX)
+ tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
+
+ tegra_qspi_mask_clear_irq(tqspi);
+
+ if (!tqspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tqspi);
+
+ return handle_dma_based_xfer(tqspi);
+}
+
+static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
+ .has_dma = true,
+ .cmb_xfer_capable = false,
+ .cs_count = 1,
+};
+
+static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
+ .has_dma = true,
+ .cmb_xfer_capable = true,
+ .cs_count = 1,
+};
+
+static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
+ .has_dma = false,
+ .cmb_xfer_capable = true,
+ .cs_count = 1,
+};
+
+static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
+ .has_dma = false,
+ .cmb_xfer_capable = true,
+ .cs_count = 4,
+};
+
+static const struct of_device_id tegra_qspi_of_match[] = {
+ {
+ .compatible = "nvidia,tegra210-qspi",
+ .data = &tegra210_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra186-qspi",
+ .data = &tegra186_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra194-qspi",
+ .data = &tegra186_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra234-qspi",
+ .data = &tegra234_qspi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra241-qspi",
+ .data = &tegra241_qspi_soc_data,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id tegra_qspi_acpi_match[] = {
+ {
+ .id = "NVDA1213",
+ .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
+ }, {
+ .id = "NVDA1313",
+ .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
+ }, {
+ .id = "NVDA1413",
+ .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
+ }, {
+ .id = "NVDA1513",
+ .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
+#endif
+
+static int tegra_qspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_qspi *tqspi;
+ struct resource *r;
+ int ret, qspi_irq;
+ int bus_num;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+ tqspi = spi_master_get_devdata(master);
+
+ master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
+ SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ master->setup = tegra_qspi_setup;
+ master->transfer_one_message = tegra_qspi_transfer_one_message;
+ master->num_chipselect = 1;
+ master->auto_runtime_pm = true;
+
+ bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
+ if (bus_num >= 0)
+ master->bus_num = bus_num;
+
+ tqspi->master = master;
+ tqspi->dev = &pdev->dev;
+ spin_lock_init(&tqspi->lock);
+
+ tqspi->soc_data = device_get_match_data(&pdev->dev);
+ master->num_chipselect = tqspi->soc_data->cs_count;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tqspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tqspi->base))
+ return PTR_ERR(tqspi->base);
+
+ tqspi->phys = r->start;
+ qspi_irq = platform_get_irq(pdev, 0);
+ if (qspi_irq < 0)
+ return qspi_irq;
+ tqspi->irq = qspi_irq;
+
+ if (!has_acpi_companion(tqspi->dev)) {
+ tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
+ if (IS_ERR(tqspi->clk)) {
+ ret = PTR_ERR(tqspi->clk);
+ dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
+ return ret;
+ }
+
+ }
+
+ tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
+ tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
+
+ ret = tegra_qspi_init_dma(tqspi);
+ if (ret < 0)
+ return ret;
+
+ if (tqspi->use_dma)
+ tqspi->max_buf_size = tqspi->dma_buf_size;
+
+ init_completion(&tqspi->tx_dma_complete);
+ init_completion(&tqspi->rx_dma_complete);
+ init_completion(&tqspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
+ goto exit_pm_disable;
+ }
+
+ if (device_reset(tqspi->dev) < 0)
+ dev_warn_once(tqspi->dev, "device reset failed\n");
+
+ tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL;
+ tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
+ tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
+ tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
+ tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
+
+ pm_runtime_put(&pdev->dev);
+
+ ret = request_threaded_irq(tqspi->irq, NULL,
+ tegra_qspi_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tqspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
+ goto exit_pm_disable;
+ }
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register master: %d\n", ret);
+ goto exit_free_irq;
+ }
+
+ return 0;
+
+exit_free_irq:
+ free_irq(qspi_irq, tqspi);
+exit_pm_disable:
+ pm_runtime_force_suspend(&pdev->dev);
+ tegra_qspi_deinit_dma(tqspi);
+ return ret;
+}
+
+static int tegra_qspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+
+ spi_unregister_master(master);
+ free_irq(tqspi->irq, tqspi);
+ pm_runtime_force_suspend(&pdev->dev);
+ tegra_qspi_deinit_dma(tqspi);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_qspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int __maybe_unused tegra_qspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", ret);
+ return ret;
+ }
+
+ tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
+ tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+
+static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+
+ /* Runtime pm disabled with ACPI */
+ if (has_acpi_companion(tqspi->dev))
+ return 0;
+ /* flush all write which are in PPSB queue by reading back */
+ tegra_qspi_readl(tqspi, QSPI_COMMAND1);
+
+ clk_disable_unprepare(tqspi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ int ret;
+
+ /* Runtime pm disabled with ACPI */
+ if (has_acpi_companion(tqspi->dev))
+ return 0;
+ ret = clk_prepare_enable(tqspi->clk);
+ if (ret < 0)
+ dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops tegra_qspi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
+};
+
+static struct platform_driver tegra_qspi_driver = {
+ .driver = {
+ .name = "tegra-qspi",
+ .pm = &tegra_qspi_pm_ops,
+ .of_match_table = tegra_qspi_of_match,
+ .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
+ },
+ .probe = tegra_qspi_probe,
+ .remove = tegra_qspi_remove,
+};
+module_platform_driver(tegra_qspi_driver);
+
+MODULE_ALIAS("platform:qspi-tegra");
+MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
+MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-test.h b/drivers/spi/spi-test.h
new file mode 100644
index 000000000..5ddecf04a
--- /dev/null
+++ b/drivers/spi/spi-test.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * linux/drivers/spi/spi-test.h
+ *
+ * (c) Martin Sperl <kernel@martin.sperl.org>
+ *
+ * spi_test definitions
+ */
+
+#include <linux/spi/spi.h>
+
+#define SPI_TEST_MAX_TRANSFERS 4
+#define SPI_TEST_MAX_SIZE (32 * PAGE_SIZE)
+#define SPI_TEST_MAX_ITERATE 32
+
+/* the "dummy" start addresses used in spi_test
+ * these addresses get translated at a later stage
+ */
+#define RX_START BIT(30)
+#define TX_START BIT(31)
+#define RX(off) ((void *)(RX_START + off))
+#define TX(off) ((void *)(TX_START + off))
+
+/* some special defines for offsets */
+#define SPI_TEST_MAX_SIZE_HALF BIT(29)
+
+/* detection pattern for unfinished reads...
+ * - 0x00 or 0xff could be valid levels for tx_buf = NULL,
+ * so we do not use either of them
+ */
+#define SPI_TEST_PATTERN_UNWRITTEN 0xAA
+#define SPI_TEST_PATTERN_DO_NOT_WRITE 0x55
+#define SPI_TEST_CHECK_DO_NOT_WRITE 64
+
+/**
+ * struct spi_test - describes a specific (set of) tests to execute
+ *
+ * @description: description of the test
+ *
+ * @msg: a template @spi_message usedfor the default settings
+ * @transfers: array of @spi_transfers that are part of the
+ * resulting spi_message.
+ * @transfer_count: number of transfers
+ *
+ * @run_test: run a specific spi_test - this allows to override
+ * the default implementation of @spi_test_run_transfer
+ * either to add some custom filters for a specific test
+ * or to effectively run some very custom tests...
+ * @execute_msg: run the spi_message for real - this allows to override
+ * @spi_test_execute_msg to apply final modifications
+ * on the spi_message
+ * @expected_return: the expected return code - in some cases we want to
+ * test also for error conditions
+ *
+ * @iterate_len: list of length to iterate on
+ * @iterate_tx_align: change the alignment of @spi_transfer.tx_buf
+ * for all values in the below range if set.
+ * the ranges are:
+ * [0 : @spi_master.dma_alignment[ if set
+ * [0 : iterate_tx_align[ if unset
+ * @iterate_rx_align: change the alignment of @spi_transfer.rx_buf
+ * see @iterate_tx_align for details
+ * @iterate_transfer_mask: the bitmask of transfers to which the iterations
+ * apply - if 0, then it applies to all transfer
+ *
+ * @fill_option: define the way how tx_buf is filled
+ * @fill_pattern: fill pattern to apply to the tx_buf
+ * (used in some of the @fill_options)
+ * @elapsed_time: elapsed time in nanoseconds
+ */
+
+struct spi_test {
+ char description[64];
+ struct spi_message msg;
+ struct spi_transfer transfers[SPI_TEST_MAX_TRANSFERS];
+ unsigned int transfer_count;
+ int (*run_test)(struct spi_device *spi, struct spi_test *test,
+ void *tx, void *rx);
+ int (*execute_msg)(struct spi_device *spi, struct spi_test *test,
+ void *tx, void *rx);
+ int expected_return;
+ /* iterate over all values, terminated by a -1 */
+ int iterate_len[SPI_TEST_MAX_ITERATE];
+ int iterate_tx_align;
+ int iterate_rx_align;
+ u32 iterate_transfer_mask;
+ /* the tx-fill operation */
+ u32 fill_option;
+#define FILL_MEMSET_8 0 /* just memset with 8 bit */
+#define FILL_MEMSET_16 1 /* just memset with 16 bit */
+#define FILL_MEMSET_24 2 /* just memset with 24 bit */
+#define FILL_MEMSET_32 3 /* just memset with 32 bit */
+#define FILL_COUNT_8 4 /* fill with a 8 byte counter */
+#define FILL_COUNT_16 5 /* fill with a 16 bit counter */
+#define FILL_COUNT_24 6 /* fill with a 24 bit counter */
+#define FILL_COUNT_32 7 /* fill with a 32 bit counter */
+#define FILL_TRANSFER_BYTE_8 8 /* fill with the transfer byte - 8 bit */
+#define FILL_TRANSFER_BYTE_16 9 /* fill with the transfer byte - 16 bit */
+#define FILL_TRANSFER_BYTE_24 10 /* fill with the transfer byte - 24 bit */
+#define FILL_TRANSFER_BYTE_32 11 /* fill with the transfer byte - 32 bit */
+#define FILL_TRANSFER_NUM 16 /* fill with the transfer number */
+ u32 fill_pattern;
+ unsigned long long elapsed_time;
+};
+
+/* default implementation for @spi_test.run_test */
+int spi_test_run_test(struct spi_device *spi,
+ const struct spi_test *test,
+ void *tx, void *rx);
+
+/* default implementation for @spi_test.execute_msg */
+int spi_test_execute_msg(struct spi_device *spi,
+ struct spi_test *test,
+ void *tx, void *rx);
+
+/* function to execute a set of tests */
+int spi_test_run_tests(struct spi_device *spi,
+ struct spi_test *tests);
+
+#define ITERATE_LEN_LIST 0, 1, 2, 3, 7, 11, 16, 31, 32, 64, 97, 128, 251, 256, \
+ 1021, 1024, 1031, 4093, PAGE_SIZE, 4099, 65536, 65537
+/* some of the default @spi_transfer.len to test, terminated by a -1 */
+#define ITERATE_LEN ITERATE_LEN_LIST, -1
+#define ITERATE_MAX_LEN ITERATE_LEN_LIST, (SPI_TEST_MAX_SIZE - 1), \
+ SPI_TEST_MAX_SIZE, -1
+
+/* the default alignment to test */
+#define ITERATE_ALIGN sizeof(int)
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
new file mode 100644
index 000000000..60086869b
--- /dev/null
+++ b/drivers/spi/spi-ti-qspi.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI QSPI driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
+ * Author: Sourav Poddar <sourav.poddar@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+struct ti_qspi_regs {
+ u32 clkctrl;
+};
+
+struct ti_qspi {
+ struct completion transfer_complete;
+
+ /* list synchronization */
+ struct mutex list_lock;
+
+ struct spi_master *master;
+ void __iomem *base;
+ void __iomem *mmap_base;
+ size_t mmap_size;
+ struct regmap *ctrl_base;
+ unsigned int ctrl_reg;
+ struct clk *fclk;
+ struct device *dev;
+
+ struct ti_qspi_regs ctx_reg;
+
+ dma_addr_t mmap_phys_base;
+ dma_addr_t rx_bb_dma_addr;
+ void *rx_bb_addr;
+ struct dma_chan *rx_chan;
+
+ u32 cmd;
+ u32 dc;
+
+ bool mmap_enabled;
+ int current_cs;
+};
+
+#define QSPI_PID (0x0)
+#define QSPI_SYSCONFIG (0x10)
+#define QSPI_SPI_CLOCK_CNTRL_REG (0x40)
+#define QSPI_SPI_DC_REG (0x44)
+#define QSPI_SPI_CMD_REG (0x48)
+#define QSPI_SPI_STATUS_REG (0x4c)
+#define QSPI_SPI_DATA_REG (0x50)
+#define QSPI_SPI_SETUP_REG(n) ((0x54 + 4 * n))
+#define QSPI_SPI_SWITCH_REG (0x64)
+#define QSPI_SPI_DATA_REG_1 (0x68)
+#define QSPI_SPI_DATA_REG_2 (0x6c)
+#define QSPI_SPI_DATA_REG_3 (0x70)
+
+#define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
+/* Clock Control */
+#define QSPI_CLK_EN (1 << 31)
+#define QSPI_CLK_DIV_MAX 0xffff
+
+/* Command */
+#define QSPI_EN_CS(n) (n << 28)
+#define QSPI_WLEN(n) ((n - 1) << 19)
+#define QSPI_3_PIN (1 << 18)
+#define QSPI_RD_SNGL (1 << 16)
+#define QSPI_WR_SNGL (2 << 16)
+#define QSPI_RD_DUAL (3 << 16)
+#define QSPI_RD_QUAD (7 << 16)
+#define QSPI_INVAL (4 << 16)
+#define QSPI_FLEN(n) ((n - 1) << 0)
+#define QSPI_WLEN_MAX_BITS 128
+#define QSPI_WLEN_MAX_BYTES 16
+#define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
+
+/* STATUS REGISTER */
+#define BUSY 0x01
+#define WC 0x02
+
+/* Device Control */
+#define QSPI_DD(m, n) (m << (3 + n * 8))
+#define QSPI_CKPHA(n) (1 << (2 + n * 8))
+#define QSPI_CSPOL(n) (1 << (1 + n * 8))
+#define QSPI_CKPOL(n) (1 << (n * 8))
+
+#define QSPI_FRAME 4096
+
+#define QSPI_AUTOSUSPEND_TIMEOUT 2000
+
+#define MEM_CS_EN(n) ((n + 1) << 8)
+#define MEM_CS_MASK (7 << 8)
+
+#define MM_SWITCH 0x1
+
+#define QSPI_SETUP_RD_NORMAL (0x0 << 12)
+#define QSPI_SETUP_RD_DUAL (0x1 << 12)
+#define QSPI_SETUP_RD_QUAD (0x3 << 12)
+#define QSPI_SETUP_ADDR_SHIFT 8
+#define QSPI_SETUP_DUMMY_SHIFT 10
+
+#define QSPI_DMA_BUFFER_SIZE SZ_64K
+
+static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
+ unsigned long reg)
+{
+ return readl(qspi->base + reg);
+}
+
+static inline void ti_qspi_write(struct ti_qspi *qspi,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, qspi->base + reg);
+}
+
+static int ti_qspi_setup(struct spi_device *spi)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ int ret;
+
+ if (spi->master->busy) {
+ dev_dbg(qspi->dev, "master busy doing other transfers\n");
+ return -EBUSY;
+ }
+
+ if (!qspi->master->max_speed_hz) {
+ dev_err(qspi->dev, "spi max frequency not defined\n");
+ return -EINVAL;
+ }
+
+ spi->max_speed_hz = min(spi->max_speed_hz, qspi->master->max_speed_hz);
+
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0) {
+ dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
+ return ret;
+ }
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ ret = pm_runtime_put_autosuspend(qspi->dev);
+ if (ret < 0) {
+ dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ti_qspi_setup_clk(struct ti_qspi *qspi, u32 speed_hz)
+{
+ struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
+ int clk_div;
+ u32 clk_ctrl_reg, clk_rate, clk_ctrl_new;
+
+ clk_rate = clk_get_rate(qspi->fclk);
+ clk_div = DIV_ROUND_UP(clk_rate, speed_hz) - 1;
+ clk_div = clamp(clk_div, 0, QSPI_CLK_DIV_MAX);
+ dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", speed_hz, clk_div);
+
+ pm_runtime_resume_and_get(qspi->dev);
+
+ clk_ctrl_new = QSPI_CLK_EN | clk_div;
+ if (ctx_reg->clkctrl != clk_ctrl_new) {
+ clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
+
+ clk_ctrl_reg &= ~QSPI_CLK_EN;
+
+ /* disable SCLK */
+ ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
+
+ /* enable SCLK */
+ ti_qspi_write(qspi, clk_ctrl_new, QSPI_SPI_CLOCK_CNTRL_REG);
+ ctx_reg->clkctrl = clk_ctrl_new;
+ }
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+}
+
+static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
+{
+ struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
+
+ ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
+}
+
+static inline u32 qspi_is_busy(struct ti_qspi *qspi)
+{
+ u32 stat;
+ unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
+
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ while ((stat & BUSY) && time_after(timeout, jiffies)) {
+ cpu_relax();
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ }
+
+ WARN(stat & BUSY, "qspi busy\n");
+ return stat & BUSY;
+}
+
+static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
+{
+ u32 stat;
+ unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
+
+ do {
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ if (stat & WC)
+ return 0;
+ cpu_relax();
+ } while (time_after(timeout, jiffies));
+
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ if (stat & WC)
+ return 0;
+ return -ETIMEDOUT;
+}
+
+static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+ int count)
+{
+ int wlen, xfer_len;
+ unsigned int cmd;
+ const u8 *txbuf;
+ u32 data;
+
+ txbuf = t->tx_buf;
+ cmd = qspi->cmd | QSPI_WR_SNGL;
+ wlen = t->bits_per_word >> 3; /* in bytes */
+ xfer_len = wlen;
+
+ while (count) {
+ if (qspi_is_busy(qspi))
+ return -EBUSY;
+
+ switch (wlen) {
+ case 1:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
+ cmd, qspi->dc, *txbuf);
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ u32 *txp = (u32 *)txbuf;
+
+ data = cpu_to_be32(*txp++);
+ writel(data, qspi->base +
+ QSPI_SPI_DATA_REG_3);
+ data = cpu_to_be32(*txp++);
+ writel(data, qspi->base +
+ QSPI_SPI_DATA_REG_2);
+ data = cpu_to_be32(*txp++);
+ writel(data, qspi->base +
+ QSPI_SPI_DATA_REG_1);
+ data = cpu_to_be32(*txp++);
+ writel(data, qspi->base +
+ QSPI_SPI_DATA_REG);
+ xfer_len = QSPI_WLEN_MAX_BYTES;
+ cmd |= QSPI_WLEN(QSPI_WLEN_MAX_BITS);
+ } else {
+ writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
+ cmd = qspi->cmd | QSPI_WR_SNGL;
+ xfer_len = wlen;
+ cmd |= QSPI_WLEN(wlen);
+ }
+ break;
+ case 2:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
+ cmd, qspi->dc, *txbuf);
+ writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ case 4:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
+ cmd, qspi->dc, *txbuf);
+ writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ }
+
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ if (ti_qspi_poll_wc(qspi)) {
+ dev_err(qspi->dev, "write timed out\n");
+ return -ETIMEDOUT;
+ }
+ txbuf += xfer_len;
+ count -= xfer_len;
+ }
+
+ return 0;
+}
+
+static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+ int count)
+{
+ int wlen;
+ unsigned int cmd;
+ u32 rx;
+ u8 rxlen, rx_wlen;
+ u8 *rxbuf;
+
+ rxbuf = t->rx_buf;
+ cmd = qspi->cmd;
+ switch (t->rx_nbits) {
+ case SPI_NBITS_DUAL:
+ cmd |= QSPI_RD_DUAL;
+ break;
+ case SPI_NBITS_QUAD:
+ cmd |= QSPI_RD_QUAD;
+ break;
+ default:
+ cmd |= QSPI_RD_SNGL;
+ break;
+ }
+ wlen = t->bits_per_word >> 3; /* in bytes */
+ rx_wlen = wlen;
+
+ while (count) {
+ dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
+ if (qspi_is_busy(qspi))
+ return -EBUSY;
+
+ switch (wlen) {
+ case 1:
+ /*
+ * Optimize the 8-bit words transfers, as used by
+ * the SPI flash devices.
+ */
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ rxlen = QSPI_WLEN_MAX_BYTES;
+ } else {
+ rxlen = min(count, 4);
+ }
+ rx_wlen = rxlen << 3;
+ cmd &= ~QSPI_WLEN_MASK;
+ cmd |= QSPI_WLEN(rx_wlen);
+ break;
+ default:
+ rxlen = wlen;
+ break;
+ }
+
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ if (ti_qspi_poll_wc(qspi)) {
+ dev_err(qspi->dev, "read timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ switch (wlen) {
+ case 1:
+ /*
+ * Optimize the 8-bit words transfers, as used by
+ * the SPI flash devices.
+ */
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ u32 *rxp = (u32 *) rxbuf;
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+ *rxp++ = be32_to_cpu(rx);
+ } else {
+ u8 *rxp = rxbuf;
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+ if (rx_wlen >= 8)
+ *rxp++ = rx >> (rx_wlen - 8);
+ if (rx_wlen >= 16)
+ *rxp++ = rx >> (rx_wlen - 16);
+ if (rx_wlen >= 24)
+ *rxp++ = rx >> (rx_wlen - 24);
+ if (rx_wlen >= 32)
+ *rxp++ = rx;
+ }
+ break;
+ case 2:
+ *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ case 4:
+ *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ }
+ rxbuf += rxlen;
+ count -= rxlen;
+ }
+
+ return 0;
+}
+
+static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+ int count)
+{
+ int ret;
+
+ if (t->tx_buf) {
+ ret = qspi_write_msg(qspi, t, count);
+ if (ret) {
+ dev_dbg(qspi->dev, "Error while writing\n");
+ return ret;
+ }
+ }
+
+ if (t->rx_buf) {
+ ret = qspi_read_msg(qspi, t, count);
+ if (ret) {
+ dev_dbg(qspi->dev, "Error while reading\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ti_qspi_dma_callback(void *param)
+{
+ struct ti_qspi *qspi = param;
+
+ complete(&qspi->transfer_complete);
+}
+
+static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len)
+{
+ struct dma_chan *chan = qspi->rx_chan;
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ struct dma_async_tx_descriptor *tx;
+ int ret;
+ unsigned long time_left;
+
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
+ if (!tx) {
+ dev_err(qspi->dev, "device_prep_dma_memcpy error\n");
+ return -EIO;
+ }
+
+ tx->callback = ti_qspi_dma_callback;
+ tx->callback_param = qspi;
+ cookie = tx->tx_submit(tx);
+ reinit_completion(&qspi->transfer_complete);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(qspi->dev, "dma_submit_error %d\n", cookie);
+ return -EIO;
+ }
+
+ dma_async_issue_pending(chan);
+ time_left = wait_for_completion_timeout(&qspi->transfer_complete,
+ msecs_to_jiffies(len));
+ if (time_left == 0) {
+ dmaengine_terminate_sync(chan);
+ dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, loff_t offs,
+ void *to, size_t readsize)
+{
+ dma_addr_t dma_src = qspi->mmap_phys_base + offs;
+ int ret = 0;
+
+ /*
+ * Use bounce buffer as FS like jffs2, ubifs may pass
+ * buffers that does not belong to kernel lowmem region.
+ */
+ while (readsize != 0) {
+ size_t xfer_len = min_t(size_t, QSPI_DMA_BUFFER_SIZE,
+ readsize);
+
+ ret = ti_qspi_dma_xfer(qspi, qspi->rx_bb_dma_addr,
+ dma_src, xfer_len);
+ if (ret != 0)
+ return ret;
+ memcpy(to, qspi->rx_bb_addr, xfer_len);
+ readsize -= xfer_len;
+ dma_src += xfer_len;
+ to += xfer_len;
+ }
+
+ return ret;
+}
+
+static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg,
+ loff_t from)
+{
+ struct scatterlist *sg;
+ dma_addr_t dma_src = qspi->mmap_phys_base + from;
+ dma_addr_t dma_dst;
+ int i, len, ret;
+
+ for_each_sg(rx_sg.sgl, sg, rx_sg.nents, i) {
+ dma_dst = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ ret = ti_qspi_dma_xfer(qspi, dma_dst, dma_src, len);
+ if (ret)
+ return ret;
+ dma_src += len;
+ }
+
+ return 0;
+}
+
+static void ti_qspi_enable_memory_map(struct spi_device *spi)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+
+ ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
+ if (qspi->ctrl_base) {
+ regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
+ MEM_CS_MASK,
+ MEM_CS_EN(spi->chip_select));
+ }
+ qspi->mmap_enabled = true;
+ qspi->current_cs = spi->chip_select;
+}
+
+static void ti_qspi_disable_memory_map(struct spi_device *spi)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+
+ ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
+ if (qspi->ctrl_base)
+ regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
+ MEM_CS_MASK, 0);
+ qspi->mmap_enabled = false;
+ qspi->current_cs = -1;
+}
+
+static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
+ u8 data_nbits, u8 addr_width,
+ u8 dummy_bytes)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ u32 memval = opcode;
+
+ switch (data_nbits) {
+ case SPI_NBITS_QUAD:
+ memval |= QSPI_SETUP_RD_QUAD;
+ break;
+ case SPI_NBITS_DUAL:
+ memval |= QSPI_SETUP_RD_DUAL;
+ break;
+ default:
+ memval |= QSPI_SETUP_RD_NORMAL;
+ break;
+ }
+ memval |= ((addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
+ dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
+ ti_qspi_write(qspi, memval,
+ QSPI_SPI_SETUP_REG(spi->chip_select));
+}
+
+static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ size_t max_len;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->addr.val < qspi->mmap_size) {
+ /* Limit MMIO to the mmaped region */
+ if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
+ max_len = qspi->mmap_size - op->addr.val;
+ op->data.nbytes = min((size_t) op->data.nbytes,
+ max_len);
+ }
+ } else {
+ /*
+ * Use fallback mode (SW generated transfers) above the
+ * mmaped region.
+ * Adjust size to comply with the QSPI max frame length.
+ */
+ max_len = QSPI_FRAME;
+ max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
+ op->data.nbytes = min((size_t) op->data.nbytes,
+ max_len);
+ }
+ }
+
+ return 0;
+}
+
+static int ti_qspi_exec_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master);
+ u32 from = 0;
+ int ret = 0;
+
+ /* Only optimize read path. */
+ if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
+ !op->addr.nbytes || op->addr.nbytes > 4)
+ return -ENOTSUPP;
+
+ /* Address exceeds MMIO window size, fall back to regular mode. */
+ from = op->addr.val;
+ if (from + op->data.nbytes > qspi->mmap_size)
+ return -ENOTSUPP;
+
+ mutex_lock(&qspi->list_lock);
+
+ if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) {
+ ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz);
+ ti_qspi_enable_memory_map(mem->spi);
+ }
+ ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
+ op->addr.nbytes, op->dummy.nbytes);
+
+ if (qspi->rx_chan) {
+ struct sg_table sgt;
+
+ if (virt_addr_valid(op->data.buf.in) &&
+ !spi_controller_dma_map_mem_op_data(mem->spi->master, op,
+ &sgt)) {
+ ret = ti_qspi_dma_xfer_sg(qspi, sgt, from);
+ spi_controller_dma_unmap_mem_op_data(mem->spi->master,
+ op, &sgt);
+ } else {
+ ret = ti_qspi_dma_bounce_buffer(qspi, from,
+ op->data.buf.in,
+ op->data.nbytes);
+ }
+ } else {
+ memcpy_fromio(op->data.buf.in, qspi->mmap_base + from,
+ op->data.nbytes);
+ }
+
+ mutex_unlock(&qspi->list_lock);
+
+ return ret;
+}
+
+static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
+ .exec_op = ti_qspi_exec_mem_op,
+ .adjust_op_size = ti_qspi_adjust_op_size,
+};
+
+static int ti_qspi_start_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t;
+ int status = 0, ret;
+ unsigned int frame_len_words, transfer_len_words;
+ int wlen;
+
+ /* setup device control reg */
+ qspi->dc = 0;
+
+ if (spi->mode & SPI_CPHA)
+ qspi->dc |= QSPI_CKPHA(spi->chip_select);
+ if (spi->mode & SPI_CPOL)
+ qspi->dc |= QSPI_CKPOL(spi->chip_select);
+ if (spi->mode & SPI_CS_HIGH)
+ qspi->dc |= QSPI_CSPOL(spi->chip_select);
+
+ frame_len_words = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list)
+ frame_len_words += t->len / (t->bits_per_word >> 3);
+ frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
+
+ /* setup command reg */
+ qspi->cmd = 0;
+ qspi->cmd |= QSPI_EN_CS(spi->chip_select);
+ qspi->cmd |= QSPI_FLEN(frame_len_words);
+
+ ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
+
+ mutex_lock(&qspi->list_lock);
+
+ if (qspi->mmap_enabled)
+ ti_qspi_disable_memory_map(spi);
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
+ QSPI_WLEN(t->bits_per_word));
+
+ wlen = t->bits_per_word >> 3;
+ transfer_len_words = min(t->len / wlen, frame_len_words);
+
+ ti_qspi_setup_clk(qspi, t->speed_hz);
+ ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
+ if (ret) {
+ dev_dbg(qspi->dev, "transfer message failed\n");
+ mutex_unlock(&qspi->list_lock);
+ return -EINVAL;
+ }
+
+ m->actual_length += transfer_len_words * wlen;
+ frame_len_words -= transfer_len_words;
+ if (frame_len_words == 0)
+ break;
+ }
+
+ mutex_unlock(&qspi->list_lock);
+
+ ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static int ti_qspi_runtime_resume(struct device *dev)
+{
+ struct ti_qspi *qspi;
+
+ qspi = dev_get_drvdata(dev);
+ ti_qspi_restore_ctx(qspi);
+
+ return 0;
+}
+
+static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
+{
+ if (qspi->rx_bb_addr)
+ dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
+ qspi->rx_bb_addr,
+ qspi->rx_bb_dma_addr);
+
+ if (qspi->rx_chan)
+ dma_release_channel(qspi->rx_chan);
+}
+
+static const struct of_device_id ti_qspi_match[] = {
+ {.compatible = "ti,dra7xxx-qspi" },
+ {.compatible = "ti,am4372-qspi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_qspi_match);
+
+static int ti_qspi_probe(struct platform_device *pdev)
+{
+ struct ti_qspi *qspi;
+ struct spi_master *master;
+ struct resource *r, *res_mmap;
+ struct device_node *np = pdev->dev.of_node;
+ u32 max_freq;
+ int ret = 0, num_cs, irq;
+ dma_cap_mask_t mask;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
+ if (!master)
+ return -ENOMEM;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
+
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = ti_qspi_setup;
+ master->auto_runtime_pm = true;
+ master->transfer_one_message = ti_qspi_start_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+ master->mem_ops = &ti_qspi_mem_ops;
+
+ if (!of_property_read_u32(np, "num-cs", &num_cs))
+ master->num_chipselect = num_cs;
+
+ qspi = spi_master_get_devdata(master);
+ qspi->master = master;
+ qspi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, qspi);
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
+ if (r == NULL) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ ret = -ENODEV;
+ goto free_master;
+ }
+ }
+
+ res_mmap = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qspi_mmap");
+ if (res_mmap == NULL) {
+ res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_mmap == NULL) {
+ dev_err(&pdev->dev,
+ "memory mapped resource not required\n");
+ }
+ }
+
+ if (res_mmap)
+ qspi->mmap_size = resource_size(res_mmap);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto free_master;
+ }
+
+ mutex_init(&qspi->list_lock);
+
+ qspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(qspi->base)) {
+ ret = PTR_ERR(qspi->base);
+ goto free_master;
+ }
+
+
+ if (of_property_read_bool(np, "syscon-chipselects")) {
+ qspi->ctrl_base =
+ syscon_regmap_lookup_by_phandle(np,
+ "syscon-chipselects");
+ if (IS_ERR(qspi->ctrl_base)) {
+ ret = PTR_ERR(qspi->ctrl_base);
+ goto free_master;
+ }
+ ret = of_property_read_u32_index(np,
+ "syscon-chipselects",
+ 1, &qspi->ctrl_reg);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "couldn't get ctrl_mod reg index\n");
+ goto free_master;
+ }
+ }
+
+ qspi->fclk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(qspi->fclk)) {
+ ret = PTR_ERR(qspi->fclk);
+ dev_err(&pdev->dev, "could not get clk: %d\n", ret);
+ }
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_enable(&pdev->dev);
+
+ if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
+ master->max_speed_hz = max_freq;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ qspi->rx_chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(qspi->rx_chan)) {
+ dev_err(qspi->dev,
+ "No Rx DMA available, trying mmap mode\n");
+ qspi->rx_chan = NULL;
+ ret = 0;
+ goto no_dma;
+ }
+ qspi->rx_bb_addr = dma_alloc_coherent(qspi->dev,
+ QSPI_DMA_BUFFER_SIZE,
+ &qspi->rx_bb_dma_addr,
+ GFP_KERNEL | GFP_DMA);
+ if (!qspi->rx_bb_addr) {
+ dev_err(qspi->dev,
+ "dma_alloc_coherent failed, using PIO mode\n");
+ dma_release_channel(qspi->rx_chan);
+ goto no_dma;
+ }
+ master->dma_rx = qspi->rx_chan;
+ init_completion(&qspi->transfer_complete);
+ if (res_mmap)
+ qspi->mmap_phys_base = (dma_addr_t)res_mmap->start;
+
+no_dma:
+ if (!qspi->rx_chan && res_mmap) {
+ qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
+ if (IS_ERR(qspi->mmap_base)) {
+ dev_info(&pdev->dev,
+ "mmap failed with error %ld using PIO mode\n",
+ PTR_ERR(qspi->mmap_base));
+ qspi->mmap_base = NULL;
+ master->mem_ops = NULL;
+ }
+ }
+ qspi->mmap_enabled = false;
+ qspi->current_cs = -1;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (!ret)
+ return 0;
+
+ ti_qspi_dma_cleanup(qspi);
+
+ pm_runtime_disable(&pdev->dev);
+free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int ti_qspi_remove(struct platform_device *pdev)
+{
+ struct ti_qspi *qspi = platform_get_drvdata(pdev);
+ int rc;
+
+ rc = spi_master_suspend(qspi->master);
+ if (rc)
+ return rc;
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ ti_qspi_dma_cleanup(qspi);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ti_qspi_pm_ops = {
+ .runtime_resume = ti_qspi_runtime_resume,
+};
+
+static struct platform_driver ti_qspi_driver = {
+ .probe = ti_qspi_probe,
+ .remove = ti_qspi_remove,
+ .driver = {
+ .name = "ti-qspi",
+ .pm = &ti_qspi_pm_ops,
+ .of_match_table = ti_qspi_match,
+ }
+};
+
+module_platform_driver(ti_qspi_driver);
+
+MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI QSPI controller driver");
+MODULE_ALIAS("platform:ti-qspi");
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
new file mode 100644
index 000000000..a565352f6
--- /dev/null
+++ b/drivers/spi/spi-tle62x0.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Support Infineon TLE62x0 driver chips
+ *
+ * Copyright (c) 2007 Simtec Electronics
+ * Ben Dooks, <ben@simtec.co.uk>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/tle62x0.h>
+
+
+#define CMD_READ 0x00
+#define CMD_SET 0xff
+
+#define DIAG_NORMAL 0x03
+#define DIAG_OVERLOAD 0x02
+#define DIAG_OPEN 0x01
+#define DIAG_SHORTGND 0x00
+
+struct tle62x0_state {
+ struct spi_device *us;
+ struct mutex lock;
+ unsigned int nr_gpio;
+ unsigned int gpio_state;
+
+ unsigned char tx_buff[4];
+ unsigned char rx_buff[4];
+};
+
+static int to_gpio_num(struct device_attribute *attr);
+
+static inline int tle62x0_write(struct tle62x0_state *st)
+{
+ unsigned char *buff = st->tx_buff;
+ unsigned int gpio_state = st->gpio_state;
+
+ buff[0] = CMD_SET;
+
+ if (st->nr_gpio == 16) {
+ buff[1] = gpio_state >> 8;
+ buff[2] = gpio_state;
+ } else {
+ buff[1] = gpio_state;
+ }
+
+ dev_dbg(&st->us->dev, "buff %3ph\n", buff);
+
+ return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
+}
+
+static inline int tle62x0_read(struct tle62x0_state *st)
+{
+ unsigned char *txbuff = st->tx_buff;
+ struct spi_transfer xfer = {
+ .tx_buf = txbuff,
+ .rx_buf = st->rx_buff,
+ .len = (st->nr_gpio * 2) / 8,
+ };
+ struct spi_message msg;
+
+ txbuff[0] = CMD_READ;
+ txbuff[1] = 0x00;
+ txbuff[2] = 0x00;
+ txbuff[3] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(st->us, &msg);
+}
+
+static unsigned char *decode_fault(unsigned int fault_code)
+{
+ fault_code &= 3;
+
+ switch (fault_code) {
+ case DIAG_NORMAL:
+ return "N";
+ case DIAG_OVERLOAD:
+ return "V";
+ case DIAG_OPEN:
+ return "O";
+ case DIAG_SHORTGND:
+ return "G";
+ }
+
+ return "?";
+}
+
+static ssize_t tle62x0_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ char *bp = buf;
+ unsigned char *buff = st->rx_buff;
+ unsigned long fault = 0;
+ int ptr;
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = tle62x0_read(st);
+ dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
+ fault <<= 8;
+ fault |= ((unsigned long)buff[ptr]);
+
+ dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]);
+ }
+
+ for (ptr = 0; ptr < st->nr_gpio; ptr++) {
+ bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2)));
+ }
+
+ *bp++ = '\n';
+
+ mutex_unlock(&st->lock);
+ return bp - buf;
+}
+
+static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL);
+
+static ssize_t tle62x0_gpio_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ int gpio_num = to_gpio_num(attr);
+ int value;
+
+ mutex_lock(&st->lock);
+ value = (st->gpio_state >> gpio_num) & 1;
+ mutex_unlock(&st->lock);
+
+ return sysfs_emit(buf, "%d", value);
+}
+
+static ssize_t tle62x0_gpio_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ int gpio_num = to_gpio_num(attr);
+ unsigned long val;
+ char *endp;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (buf == endp)
+ return -EINVAL;
+
+ dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val);
+
+ mutex_lock(&st->lock);
+
+ if (val)
+ st->gpio_state |= 1 << gpio_num;
+ else
+ st->gpio_state &= ~(1 << gpio_num);
+
+ tle62x0_write(st);
+ mutex_unlock(&st->lock);
+
+ return len;
+}
+
+static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+
+static struct device_attribute *gpio_attrs[] = {
+ [0] = &dev_attr_gpio1,
+ [1] = &dev_attr_gpio2,
+ [2] = &dev_attr_gpio3,
+ [3] = &dev_attr_gpio4,
+ [4] = &dev_attr_gpio5,
+ [5] = &dev_attr_gpio6,
+ [6] = &dev_attr_gpio7,
+ [7] = &dev_attr_gpio8,
+ [8] = &dev_attr_gpio9,
+ [9] = &dev_attr_gpio10,
+ [10] = &dev_attr_gpio11,
+ [11] = &dev_attr_gpio12,
+ [12] = &dev_attr_gpio13,
+ [13] = &dev_attr_gpio14,
+ [14] = &dev_attr_gpio15,
+ [15] = &dev_attr_gpio16
+};
+
+static int to_gpio_num(struct device_attribute *attr)
+{
+ int ptr;
+
+ for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) {
+ if (gpio_attrs[ptr] == attr)
+ return ptr;
+ }
+
+ return -1;
+}
+
+static int tle62x0_probe(struct spi_device *spi)
+{
+ struct tle62x0_state *st;
+ struct tle62x0_pdata *pdata;
+ int ptr;
+ int ret;
+
+ pdata = dev_get_platdata(&spi->dev);
+ if (pdata == NULL) {
+ dev_err(&spi->dev, "no device data specified\n");
+ return -EINVAL;
+ }
+
+ st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
+ st->us = spi;
+ st->nr_gpio = pdata->gpio_count;
+ st->gpio_state = pdata->init_state;
+
+ mutex_init(&st->lock);
+
+ ret = device_create_file(&spi->dev, &dev_attr_status_show);
+ if (ret) {
+ dev_err(&spi->dev, "cannot create status attribute\n");
+ goto err_status;
+ }
+
+ for (ptr = 0; ptr < pdata->gpio_count; ptr++) {
+ ret = device_create_file(&spi->dev, gpio_attrs[ptr]);
+ if (ret) {
+ dev_err(&spi->dev, "cannot create gpio attribute\n");
+ goto err_gpios;
+ }
+ }
+
+ /* tle62x0_write(st); */
+ spi_set_drvdata(spi, st);
+ return 0;
+
+ err_gpios:
+ while (--ptr >= 0)
+ device_remove_file(&spi->dev, gpio_attrs[ptr]);
+
+ device_remove_file(&spi->dev, &dev_attr_status_show);
+
+ err_status:
+ kfree(st);
+ return ret;
+}
+
+static void tle62x0_remove(struct spi_device *spi)
+{
+ struct tle62x0_state *st = spi_get_drvdata(spi);
+ int ptr;
+
+ for (ptr = 0; ptr < st->nr_gpio; ptr++)
+ device_remove_file(&spi->dev, gpio_attrs[ptr]);
+
+ device_remove_file(&spi->dev, &dev_attr_status_show);
+ kfree(st);
+}
+
+static struct spi_driver tle62x0_driver = {
+ .driver = {
+ .name = "tle62x0",
+ },
+ .probe = tle62x0_probe,
+ .remove = tle62x0_remove,
+};
+
+module_spi_driver(tle62x0_driver);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("TLE62x0 SPI driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:tle62x0");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
new file mode 100644
index 000000000..cbb60198a
--- /dev/null
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -0,0 +1,1684 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI bus driver for the Topcliff PCH used by Intel SoCs
+ *
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/spi/spidev.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/dmaengine.h>
+#include <linux/pch_dma.h>
+
+/* Register offsets */
+#define PCH_SPCR 0x00 /* SPI control register */
+#define PCH_SPBRR 0x04 /* SPI baud rate register */
+#define PCH_SPSR 0x08 /* SPI status register */
+#define PCH_SPDWR 0x0C /* SPI write data register */
+#define PCH_SPDRR 0x10 /* SPI read data register */
+#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */
+#define PCH_SRST 0x1C /* SPI reset register */
+#define PCH_ADDRESS_SIZE 0x20
+
+#define PCH_SPSR_TFD 0x000007C0
+#define PCH_SPSR_RFD 0x0000F800
+
+#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
+#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
+
+#define PCH_RX_THOLD 7
+#define PCH_RX_THOLD_MAX 15
+
+#define PCH_TX_THOLD 2
+
+#define PCH_MAX_BAUDRATE 5000000
+#define PCH_MAX_FIFO_DEPTH 16
+
+#define STATUS_RUNNING 1
+#define STATUS_EXITING 2
+#define PCH_SLEEP_TIME 10
+
+#define SSN_LOW 0x02U
+#define SSN_HIGH 0x03U
+#define SSN_NO_CONTROL 0x00U
+#define PCH_MAX_CS 0xFF
+#define PCI_DEVICE_ID_GE_SPI 0x8816
+
+#define SPCR_SPE_BIT (1 << 0)
+#define SPCR_MSTR_BIT (1 << 1)
+#define SPCR_LSBF_BIT (1 << 4)
+#define SPCR_CPHA_BIT (1 << 5)
+#define SPCR_CPOL_BIT (1 << 6)
+#define SPCR_TFIE_BIT (1 << 8)
+#define SPCR_RFIE_BIT (1 << 9)
+#define SPCR_FIE_BIT (1 << 10)
+#define SPCR_ORIE_BIT (1 << 11)
+#define SPCR_MDFIE_BIT (1 << 12)
+#define SPCR_FICLR_BIT (1 << 24)
+#define SPSR_TFI_BIT (1 << 0)
+#define SPSR_RFI_BIT (1 << 1)
+#define SPSR_FI_BIT (1 << 2)
+#define SPSR_ORF_BIT (1 << 3)
+#define SPBRR_SIZE_BIT (1 << 10)
+
+#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
+ SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
+
+#define SPCR_RFIC_FIELD 20
+#define SPCR_TFIC_FIELD 16
+
+#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
+#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
+#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
+
+#define PCH_CLOCK_HZ 50000000
+#define PCH_MAX_SPBR 1023
+
+/* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
+#define PCI_DEVICE_ID_ML7213_SPI 0x802c
+#define PCI_DEVICE_ID_ML7223_SPI 0x800F
+#define PCI_DEVICE_ID_ML7831_SPI 0x8816
+
+/*
+ * Set the number of SPI instance max
+ * Intel EG20T PCH : 1ch
+ * LAPIS Semiconductor ML7213 IOH : 2ch
+ * LAPIS Semiconductor ML7223 IOH : 1ch
+ * LAPIS Semiconductor ML7831 IOH : 1ch
+*/
+#define PCH_SPI_MAX_DEV 2
+
+#define PCH_BUF_SIZE 4096
+#define PCH_DMA_TRANS_SIZE 12
+
+static int use_dma = 1;
+
+struct pch_spi_dma_ctrl {
+ struct pci_dev *dma_dev;
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
+ struct pch_dma_slave param_tx;
+ struct pch_dma_slave param_rx;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct scatterlist *sg_tx_p;
+ struct scatterlist *sg_rx_p;
+ struct scatterlist sg_tx;
+ struct scatterlist sg_rx;
+ int nent;
+ void *tx_buf_virt;
+ void *rx_buf_virt;
+ dma_addr_t tx_buf_dma;
+ dma_addr_t rx_buf_dma;
+};
+/**
+ * struct pch_spi_data - Holds the SPI channel specific details
+ * @io_remap_addr: The remapped PCI base address
+ * @io_base_addr: Base address
+ * @master: Pointer to the SPI master structure
+ * @work: Reference to work queue handler
+ * @wait: Wait queue for waking up upon receiving an
+ * interrupt.
+ * @transfer_complete: Status of SPI Transfer
+ * @bcurrent_msg_processing: Status flag for message processing
+ * @lock: Lock for protecting this structure
+ * @queue: SPI Message queue
+ * @status: Status of the SPI driver
+ * @bpw_len: Length of data to be transferred in bits per
+ * word
+ * @transfer_active: Flag showing active transfer
+ * @tx_index: Transmit data count; for bookkeeping during
+ * transfer
+ * @rx_index: Receive data count; for bookkeeping during
+ * transfer
+ * @pkt_tx_buff: Buffer for data to be transmitted
+ * @pkt_rx_buff: Buffer for received data
+ * @n_curnt_chip: The chip number that this SPI driver currently
+ * operates on
+ * @current_chip: Reference to the current chip that this SPI
+ * driver currently operates on
+ * @current_msg: The current message that this SPI driver is
+ * handling
+ * @cur_trans: The current transfer that this SPI driver is
+ * handling
+ * @board_dat: Reference to the SPI device data structure
+ * @plat_dev: platform_device structure
+ * @ch: SPI channel number
+ * @dma: Local DMA information
+ * @use_dma: True if DMA is to be used
+ * @irq_reg_sts: Status of IRQ registration
+ * @save_total_len: Save length while data is being transferred
+ */
+struct pch_spi_data {
+ void __iomem *io_remap_addr;
+ unsigned long io_base_addr;
+ struct spi_master *master;
+ struct work_struct work;
+ wait_queue_head_t wait;
+ u8 transfer_complete;
+ u8 bcurrent_msg_processing;
+ spinlock_t lock;
+ struct list_head queue;
+ u8 status;
+ u32 bpw_len;
+ u8 transfer_active;
+ u32 tx_index;
+ u32 rx_index;
+ u16 *pkt_tx_buff;
+ u16 *pkt_rx_buff;
+ u8 n_curnt_chip;
+ struct spi_device *current_chip;
+ struct spi_message *current_msg;
+ struct spi_transfer *cur_trans;
+ struct pch_spi_board_data *board_dat;
+ struct platform_device *plat_dev;
+ int ch;
+ struct pch_spi_dma_ctrl dma;
+ int use_dma;
+ u8 irq_reg_sts;
+ int save_total_len;
+};
+
+/**
+ * struct pch_spi_board_data - Holds the SPI device specific details
+ * @pdev: Pointer to the PCI device
+ * @suspend_sts: Status of suspend
+ * @num: The number of SPI device instance
+ */
+struct pch_spi_board_data {
+ struct pci_dev *pdev;
+ u8 suspend_sts;
+ int num;
+};
+
+struct pch_pd_dev_save {
+ int num;
+ struct platform_device *pd_save[PCH_SPI_MAX_DEV];
+ struct pch_spi_board_data *board_dat;
+};
+
+static const struct pci_device_id pch_spi_pcidev_id[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
+ { }
+};
+
+/**
+ * pch_spi_writereg() - Performs register writes
+ * @master: Pointer to struct spi_master.
+ * @idx: Register offset.
+ * @val: Value to be written to register.
+ */
+static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
+{
+ struct pch_spi_data *data = spi_master_get_devdata(master);
+ iowrite32(val, (data->io_remap_addr + idx));
+}
+
+/**
+ * pch_spi_readreg() - Performs register reads
+ * @master: Pointer to struct spi_master.
+ * @idx: Register offset.
+ */
+static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
+{
+ struct pch_spi_data *data = spi_master_get_devdata(master);
+ return ioread32(data->io_remap_addr + idx);
+}
+
+static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
+ u32 set, u32 clr)
+{
+ u32 tmp = pch_spi_readreg(master, idx);
+ tmp = (tmp & ~clr) | set;
+ pch_spi_writereg(master, idx, tmp);
+}
+
+static void pch_spi_set_master_mode(struct spi_master *master)
+{
+ pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
+}
+
+/**
+ * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
+ * @master: Pointer to struct spi_master.
+ */
+static void pch_spi_clear_fifo(struct spi_master *master)
+{
+ pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
+ pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
+}
+
+static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
+ void __iomem *io_remap_addr)
+{
+ u32 n_read, tx_index, rx_index, bpw_len;
+ u16 *pkt_rx_buffer, *pkt_tx_buff;
+ int read_cnt;
+ u32 reg_spcr_val;
+ void __iomem *spsr;
+ void __iomem *spdrr;
+ void __iomem *spdwr;
+
+ spsr = io_remap_addr + PCH_SPSR;
+ iowrite32(reg_spsr_val, spsr);
+
+ if (data->transfer_active) {
+ rx_index = data->rx_index;
+ tx_index = data->tx_index;
+ bpw_len = data->bpw_len;
+ pkt_rx_buffer = data->pkt_rx_buff;
+ pkt_tx_buff = data->pkt_tx_buff;
+
+ spdrr = io_remap_addr + PCH_SPDRR;
+ spdwr = io_remap_addr + PCH_SPDWR;
+
+ n_read = PCH_READABLE(reg_spsr_val);
+
+ for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
+ pkt_rx_buffer[rx_index++] = ioread32(spdrr);
+ if (tx_index < bpw_len)
+ iowrite32(pkt_tx_buff[tx_index++], spdwr);
+ }
+
+ /* disable RFI if not needed */
+ if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
+ reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
+ reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
+
+ /* reset rx threshold */
+ reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
+ reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
+
+ iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
+ }
+
+ /* update counts */
+ data->tx_index = tx_index;
+ data->rx_index = rx_index;
+
+ /* if transfer complete interrupt */
+ if (reg_spsr_val & SPSR_FI_BIT) {
+ if ((tx_index == bpw_len) && (rx_index == tx_index)) {
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ PCH_ALL);
+
+ /* transfer is completed;
+ inform pch_spi_process_messages */
+ data->transfer_complete = true;
+ data->transfer_active = false;
+ wake_up(&data->wait);
+ } else {
+ dev_vdbg(&data->master->dev,
+ "%s : Transfer is not completed",
+ __func__);
+ }
+ }
+ }
+}
+
+/**
+ * pch_spi_handler() - Interrupt handler
+ * @irq: The interrupt number.
+ * @dev_id: Pointer to struct pch_spi_board_data.
+ */
+static irqreturn_t pch_spi_handler(int irq, void *dev_id)
+{
+ u32 reg_spsr_val;
+ void __iomem *spsr;
+ void __iomem *io_remap_addr;
+ irqreturn_t ret = IRQ_NONE;
+ struct pch_spi_data *data = dev_id;
+ struct pch_spi_board_data *board_dat = data->board_dat;
+
+ if (board_dat->suspend_sts) {
+ dev_dbg(&board_dat->pdev->dev,
+ "%s returning due to suspend\n", __func__);
+ return IRQ_NONE;
+ }
+
+ io_remap_addr = data->io_remap_addr;
+ spsr = io_remap_addr + PCH_SPSR;
+
+ reg_spsr_val = ioread32(spsr);
+
+ if (reg_spsr_val & SPSR_ORF_BIT) {
+ dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
+ if (data->current_msg->complete) {
+ data->transfer_complete = true;
+ data->current_msg->status = -EIO;
+ data->current_msg->complete(data->current_msg->context);
+ data->bcurrent_msg_processing = false;
+ data->current_msg = NULL;
+ data->cur_trans = NULL;
+ }
+ }
+
+ if (data->use_dma)
+ return IRQ_NONE;
+
+ /* Check if the interrupt is for SPI device */
+ if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
+ pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
+ ret = IRQ_HANDLED;
+ }
+
+ dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/**
+ * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
+ * @master: Pointer to struct spi_master.
+ * @speed_hz: Baud rate.
+ */
+static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
+{
+ u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
+
+ /* if baud rate is less than we can support limit it */
+ if (n_spbr > PCH_MAX_SPBR)
+ n_spbr = PCH_MAX_SPBR;
+
+ pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
+}
+
+/**
+ * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
+ * @master: Pointer to struct spi_master.
+ * @bits_per_word: Bits per word for SPI transfer.
+ */
+static void pch_spi_set_bits_per_word(struct spi_master *master,
+ u8 bits_per_word)
+{
+ if (bits_per_word == 8)
+ pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
+ else
+ pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
+}
+
+/**
+ * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer
+ * @spi: Pointer to struct spi_device.
+ */
+static void pch_spi_setup_transfer(struct spi_device *spi)
+{
+ u32 flags = 0;
+
+ dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
+ __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
+ spi->max_speed_hz);
+ pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
+
+ /* set bits per word */
+ pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
+
+ if (!(spi->mode & SPI_LSB_FIRST))
+ flags |= SPCR_LSBF_BIT;
+ if (spi->mode & SPI_CPOL)
+ flags |= SPCR_CPOL_BIT;
+ if (spi->mode & SPI_CPHA)
+ flags |= SPCR_CPHA_BIT;
+ pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
+ (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
+
+ /* Clear the FIFO by toggling FICLR to 1 and back to 0 */
+ pch_spi_clear_fifo(spi->master);
+}
+
+/**
+ * pch_spi_reset() - Clears SPI registers
+ * @master: Pointer to struct spi_master.
+ */
+static void pch_spi_reset(struct spi_master *master)
+{
+ /* write 1 to reset SPI */
+ pch_spi_writereg(master, PCH_SRST, 0x1);
+
+ /* clear reset */
+ pch_spi_writereg(master, PCH_SRST, 0x0);
+}
+
+static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
+{
+ struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
+ int retval;
+ unsigned long flags;
+
+ /* We won't process any messages if we have been asked to terminate */
+ if (data->status == STATUS_EXITING) {
+ dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
+ retval = -ESHUTDOWN;
+ goto err_out;
+ }
+
+ /* If suspended ,return -EINVAL */
+ if (data->board_dat->suspend_sts) {
+ dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
+ retval = -EINVAL;
+ goto err_out;
+ }
+
+ /* set status of message */
+ pmsg->actual_length = 0;
+ dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
+
+ pmsg->status = -EINPROGRESS;
+ spin_lock_irqsave(&data->lock, flags);
+ /* add message to queue */
+ list_add_tail(&pmsg->queue, &data->queue);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
+
+ schedule_work(&data->work);
+ dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
+
+ retval = 0;
+
+err_out:
+ dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
+ return retval;
+}
+
+static inline void pch_spi_select_chip(struct pch_spi_data *data,
+ struct spi_device *pspi)
+{
+ if (data->current_chip != NULL) {
+ if (pspi->chip_select != data->n_curnt_chip) {
+ dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
+ data->current_chip = NULL;
+ }
+ }
+
+ data->current_chip = pspi;
+
+ data->n_curnt_chip = data->current_chip->chip_select;
+
+ dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
+ pch_spi_setup_transfer(pspi);
+}
+
+static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
+{
+ int size;
+ u32 n_writes;
+ int j;
+ struct spi_message *pmsg, *tmp;
+ const u8 *tx_buf;
+ const u16 *tx_sbuf;
+
+ /* set baud rate if needed */
+ if (data->cur_trans->speed_hz) {
+ dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
+ pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+ }
+
+ /* set bits per word if needed */
+ if (data->cur_trans->bits_per_word &&
+ (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
+ dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
+ pch_spi_set_bits_per_word(data->master,
+ data->cur_trans->bits_per_word);
+ *bpw = data->cur_trans->bits_per_word;
+ } else {
+ *bpw = data->current_msg->spi->bits_per_word;
+ }
+
+ /* reset Tx/Rx index */
+ data->tx_index = 0;
+ data->rx_index = 0;
+
+ data->bpw_len = data->cur_trans->len / (*bpw / 8);
+
+ /* find alloc size */
+ size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
+
+ /* allocate memory for pkt_tx_buff & pkt_rx_buffer */
+ data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
+ if (data->pkt_tx_buff != NULL) {
+ data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
+ if (!data->pkt_rx_buff) {
+ kfree(data->pkt_tx_buff);
+ data->pkt_tx_buff = NULL;
+ }
+ }
+
+ if (!data->pkt_rx_buff) {
+ /* flush queue and set status of all transfers to -ENOMEM */
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
+ pmsg->status = -ENOMEM;
+
+ if (pmsg->complete)
+ pmsg->complete(pmsg->context);
+
+ /* delete from queue */
+ list_del_init(&pmsg->queue);
+ }
+ return;
+ }
+
+ /* copy Tx Data */
+ if (data->cur_trans->tx_buf != NULL) {
+ if (*bpw == 8) {
+ tx_buf = data->cur_trans->tx_buf;
+ for (j = 0; j < data->bpw_len; j++)
+ data->pkt_tx_buff[j] = *tx_buf++;
+ } else {
+ tx_sbuf = data->cur_trans->tx_buf;
+ for (j = 0; j < data->bpw_len; j++)
+ data->pkt_tx_buff[j] = *tx_sbuf++;
+ }
+ }
+
+ /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */
+ n_writes = data->bpw_len;
+ if (n_writes > PCH_MAX_FIFO_DEPTH)
+ n_writes = PCH_MAX_FIFO_DEPTH;
+
+ dev_dbg(&data->master->dev,
+ "\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
+ __func__);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+
+ for (j = 0; j < n_writes; j++)
+ pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
+
+ /* update tx_index */
+ data->tx_index = j;
+
+ /* reset transfer complete flag */
+ data->transfer_complete = false;
+ data->transfer_active = true;
+}
+
+static void pch_spi_nomore_transfer(struct pch_spi_data *data)
+{
+ struct spi_message *pmsg, *tmp;
+ dev_dbg(&data->master->dev, "%s called\n", __func__);
+ /* Invoke complete callback
+ * [To the spi core..indicating end of transfer] */
+ data->current_msg->status = 0;
+
+ if (data->current_msg->complete) {
+ dev_dbg(&data->master->dev,
+ "%s:Invoking callback of SPI core\n", __func__);
+ data->current_msg->complete(data->current_msg->context);
+ }
+
+ /* update status in global variable */
+ data->bcurrent_msg_processing = false;
+
+ dev_dbg(&data->master->dev,
+ "%s:data->bcurrent_msg_processing = false\n", __func__);
+
+ data->current_msg = NULL;
+ data->cur_trans = NULL;
+
+ /* check if we have items in list and not suspending
+ * return 1 if list empty */
+ if ((list_empty(&data->queue) == 0) &&
+ (!data->board_dat->suspend_sts) &&
+ (data->status != STATUS_EXITING)) {
+ /* We have some more work to do (either there is more tranint
+ * bpw;sfer requests in the current message or there are
+ *more messages)
+ */
+ dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
+ schedule_work(&data->work);
+ } else if (data->board_dat->suspend_sts ||
+ data->status == STATUS_EXITING) {
+ dev_dbg(&data->master->dev,
+ "%s suspend/remove initiated, flushing queue\n",
+ __func__);
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
+ pmsg->status = -EIO;
+
+ if (pmsg->complete)
+ pmsg->complete(pmsg->context);
+
+ /* delete from queue */
+ list_del_init(&pmsg->queue);
+ }
+ }
+}
+
+static void pch_spi_set_ir(struct pch_spi_data *data)
+{
+ /* enable interrupts, set threshold, enable SPI */
+ if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
+ /* set receive threshold to PCH_RX_THOLD */
+ pch_spi_setclr_reg(data->master, PCH_SPCR,
+ PCH_RX_THOLD << SPCR_RFIC_FIELD |
+ SPCR_FIE_BIT | SPCR_RFIE_BIT |
+ SPCR_ORIE_BIT | SPCR_SPE_BIT,
+ MASK_RFIC_SPCR_BITS | PCH_ALL);
+ else
+ /* set receive threshold to maximum */
+ pch_spi_setclr_reg(data->master, PCH_SPCR,
+ PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
+ SPCR_FIE_BIT | SPCR_ORIE_BIT |
+ SPCR_SPE_BIT,
+ MASK_RFIC_SPCR_BITS | PCH_ALL);
+
+ /* Wait until the transfer completes; go to sleep after
+ initiating the transfer. */
+ dev_dbg(&data->master->dev,
+ "%s:waiting for transfer to get over\n", __func__);
+
+ wait_event_interruptible(data->wait, data->transfer_complete);
+
+ /* clear all interrupts */
+ pch_spi_writereg(data->master, PCH_SPSR,
+ pch_spi_readreg(data->master, PCH_SPSR));
+ /* Disable interrupts and SPI transfer */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
+ /* clear FIFO */
+ pch_spi_clear_fifo(data->master);
+}
+
+static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
+{
+ int j;
+ u8 *rx_buf;
+ u16 *rx_sbuf;
+
+ /* copy Rx Data */
+ if (!data->cur_trans->rx_buf)
+ return;
+
+ if (bpw == 8) {
+ rx_buf = data->cur_trans->rx_buf;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
+ } else {
+ rx_sbuf = data->cur_trans->rx_buf;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_sbuf++ = data->pkt_rx_buff[j];
+ }
+}
+
+static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
+{
+ int j;
+ u8 *rx_buf;
+ u16 *rx_sbuf;
+ const u8 *rx_dma_buf;
+ const u16 *rx_dma_sbuf;
+
+ /* copy Rx Data */
+ if (!data->cur_trans->rx_buf)
+ return;
+
+ if (bpw == 8) {
+ rx_buf = data->cur_trans->rx_buf;
+ rx_dma_buf = data->dma.rx_buf_virt;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_buf++ = *rx_dma_buf++ & 0xFF;
+ data->cur_trans->rx_buf = rx_buf;
+ } else {
+ rx_sbuf = data->cur_trans->rx_buf;
+ rx_dma_sbuf = data->dma.rx_buf_virt;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_sbuf++ = *rx_dma_sbuf++;
+ data->cur_trans->rx_buf = rx_sbuf;
+ }
+}
+
+static int pch_spi_start_transfer(struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+ unsigned long flags;
+ int rtn;
+
+ dma = &data->dma;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* disable interrupts, SPI set enable */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* Wait until the transfer completes; go to sleep after
+ initiating the transfer. */
+ dev_dbg(&data->master->dev,
+ "%s:waiting for transfer to get over\n", __func__);
+ rtn = wait_event_interruptible_timeout(data->wait,
+ data->transfer_complete,
+ msecs_to_jiffies(2 * HZ));
+ if (!rtn)
+ dev_err(&data->master->dev,
+ "%s wait-event timeout\n", __func__);
+
+ dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
+ DMA_FROM_DEVICE);
+
+ dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
+ DMA_FROM_DEVICE);
+ memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
+
+ async_tx_ack(dma->desc_rx);
+ async_tx_ack(dma->desc_tx);
+ kfree(dma->sg_tx_p);
+ kfree(dma->sg_rx_p);
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* clear fifo threshold, disable interrupts, disable SPI transfer */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
+ SPCR_SPE_BIT);
+ /* clear all interrupts */
+ pch_spi_writereg(data->master, PCH_SPSR,
+ pch_spi_readreg(data->master, PCH_SPSR));
+ /* clear FIFO */
+ pch_spi_clear_fifo(data->master);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return rtn;
+}
+
+static void pch_dma_rx_complete(void *arg)
+{
+ struct pch_spi_data *data = arg;
+
+ /* transfer is completed;inform pch_spi_process_messages_dma */
+ data->transfer_complete = true;
+ wake_up_interruptible(&data->wait);
+}
+
+static bool pch_spi_filter(struct dma_chan *chan, void *slave)
+{
+ struct pch_dma_slave *param = slave;
+
+ if ((chan->chan_id == param->chan_id) &&
+ (param->dma_dev == chan->device->dev)) {
+ chan->private = param;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct pci_dev *dma_dev;
+ struct pch_dma_slave *param;
+ struct pch_spi_dma_ctrl *dma;
+ unsigned int width;
+
+ if (bpw == 8)
+ width = PCH_DMA_WIDTH_1_BYTE;
+ else
+ width = PCH_DMA_WIDTH_2_BYTES;
+
+ dma = &data->dma;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Get DMA's dev information */
+ dma_dev = pci_get_slot(data->board_dat->pdev->bus,
+ PCI_DEVFN(PCI_SLOT(data->board_dat->pdev->devfn), 0));
+
+ /* Set Tx DMA */
+ param = &dma->param_tx;
+ param->dma_dev = &dma_dev->dev;
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */
+ param->tx_reg = data->io_base_addr + PCH_SPDWR;
+ param->width = width;
+ chan = dma_request_channel(mask, pch_spi_filter, param);
+ if (!chan) {
+ dev_err(&data->master->dev,
+ "ERROR: dma_request_channel FAILS(Tx)\n");
+ goto out;
+ }
+ dma->chan_tx = chan;
+
+ /* Set Rx DMA */
+ param = &dma->param_rx;
+ param->dma_dev = &dma_dev->dev;
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
+ param->rx_reg = data->io_base_addr + PCH_SPDRR;
+ param->width = width;
+ chan = dma_request_channel(mask, pch_spi_filter, param);
+ if (!chan) {
+ dev_err(&data->master->dev,
+ "ERROR: dma_request_channel FAILS(Rx)\n");
+ dma_release_channel(dma->chan_tx);
+ dma->chan_tx = NULL;
+ goto out;
+ }
+ dma->chan_rx = chan;
+
+ dma->dma_dev = dma_dev;
+ return;
+out:
+ pci_dev_put(dma_dev);
+ data->use_dma = 0;
+}
+
+static void pch_spi_release_dma(struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ if (dma->chan_tx) {
+ dma_release_channel(dma->chan_tx);
+ dma->chan_tx = NULL;
+ }
+ if (dma->chan_rx) {
+ dma_release_channel(dma->chan_rx);
+ dma->chan_rx = NULL;
+ }
+
+ pci_dev_put(dma->dma_dev);
+}
+
+static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+{
+ const u8 *tx_buf;
+ const u16 *tx_sbuf;
+ u8 *tx_dma_buf;
+ u16 *tx_dma_sbuf;
+ struct scatterlist *sg;
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
+ int num;
+ int i;
+ int size;
+ int rem;
+ int head;
+ unsigned long flags;
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+
+ /* set baud rate if needed */
+ if (data->cur_trans->speed_hz) {
+ dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ /* set bits per word if needed */
+ if (data->cur_trans->bits_per_word &&
+ (data->current_msg->spi->bits_per_word !=
+ data->cur_trans->bits_per_word)) {
+ dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_set_bits_per_word(data->master,
+ data->cur_trans->bits_per_word);
+ spin_unlock_irqrestore(&data->lock, flags);
+ *bpw = data->cur_trans->bits_per_word;
+ } else {
+ *bpw = data->current_msg->spi->bits_per_word;
+ }
+ data->bpw_len = data->cur_trans->len / (*bpw / 8);
+
+ if (data->bpw_len > PCH_BUF_SIZE) {
+ data->bpw_len = PCH_BUF_SIZE;
+ data->cur_trans->len -= PCH_BUF_SIZE;
+ }
+
+ /* copy Tx Data */
+ if (data->cur_trans->tx_buf != NULL) {
+ if (*bpw == 8) {
+ tx_buf = data->cur_trans->tx_buf;
+ tx_dma_buf = dma->tx_buf_virt;
+ for (i = 0; i < data->bpw_len; i++)
+ *tx_dma_buf++ = *tx_buf++;
+ } else {
+ tx_sbuf = data->cur_trans->tx_buf;
+ tx_dma_sbuf = dma->tx_buf_virt;
+ for (i = 0; i < data->bpw_len; i++)
+ *tx_dma_sbuf++ = *tx_sbuf++;
+ }
+ }
+
+ /* Calculate Rx parameter for DMA transmitting */
+ if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
+ if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
+ } else {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE;
+ rem = PCH_DMA_TRANS_SIZE;
+ }
+ size = PCH_DMA_TRANS_SIZE;
+ } else {
+ num = 1;
+ size = data->bpw_len;
+ rem = data->bpw_len;
+ }
+ dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
+ __func__, num, size, rem);
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* set receive fifo threshold and transmit fifo threshold */
+ pch_spi_setclr_reg(data->master, PCH_SPCR,
+ ((size - 1) << SPCR_RFIC_FIELD) |
+ (PCH_TX_THOLD << SPCR_TFIC_FIELD),
+ MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* RX */
+ dma->sg_rx_p = kmalloc_array(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
+ if (!dma->sg_rx_p)
+ return;
+
+ sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
+ /* offset, length setting */
+ sg = dma->sg_rx_p;
+ for (i = 0; i < num; i++, sg++) {
+ if (i == (num - 2)) {
+ sg->offset = size * i;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
+ sg->offset);
+ sg_dma_len(sg) = rem;
+ } else if (i == (num - 1)) {
+ sg->offset = size * (i - 1) + rem;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
+ } else {
+ sg->offset = size * i;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
+ }
+ sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
+ }
+ sg = dma->sg_rx_p;
+ desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
+ num, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ dev_err(&data->master->dev,
+ "%s:dmaengine_prep_slave_sg Failed\n", __func__);
+ return;
+ }
+ dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
+ desc_rx->callback = pch_dma_rx_complete;
+ desc_rx->callback_param = data;
+ dma->nent = num;
+ dma->desc_rx = desc_rx;
+
+ /* Calculate Tx parameter for DMA transmitting */
+ if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
+ head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
+ if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
+ } else {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
+ PCH_DMA_TRANS_SIZE - head;
+ }
+ size = PCH_DMA_TRANS_SIZE;
+ } else {
+ num = 1;
+ size = data->bpw_len;
+ rem = data->bpw_len;
+ head = 0;
+ }
+
+ dma->sg_tx_p = kmalloc_array(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
+ if (!dma->sg_tx_p)
+ return;
+
+ sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
+ /* offset, length setting */
+ sg = dma->sg_tx_p;
+ for (i = 0; i < num; i++, sg++) {
+ if (i == 0) {
+ sg->offset = 0;
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
+ sg->offset);
+ sg_dma_len(sg) = size + head;
+ } else if (i == (num - 1)) {
+ sg->offset = head + size * i;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
+ sg->offset);
+ sg_dma_len(sg) = rem;
+ } else {
+ sg->offset = head + size * i;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
+ }
+ sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
+ }
+ sg = dma->sg_tx_p;
+ desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
+ sg, num, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ dev_err(&data->master->dev,
+ "%s:dmaengine_prep_slave_sg Failed\n", __func__);
+ return;
+ }
+ dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
+ desc_tx->callback = NULL;
+ desc_tx->callback_param = data;
+ dma->nent = num;
+ dma->desc_tx = desc_tx;
+
+ dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
+
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+ desc_rx->tx_submit(desc_rx);
+ desc_tx->tx_submit(desc_tx);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* reset transfer complete flag */
+ data->transfer_complete = false;
+}
+
+static void pch_spi_process_messages(struct work_struct *pwork)
+{
+ struct spi_message *pmsg, *tmp;
+ struct pch_spi_data *data;
+ int bpw;
+
+ data = container_of(pwork, struct pch_spi_data, work);
+ dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
+
+ spin_lock(&data->lock);
+ /* check if suspend has been initiated;if yes flush queue */
+ if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
+ dev_dbg(&data->master->dev,
+ "%s suspend/remove initiated, flushing queue\n", __func__);
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
+ pmsg->status = -EIO;
+
+ if (pmsg->complete) {
+ spin_unlock(&data->lock);
+ pmsg->complete(pmsg->context);
+ spin_lock(&data->lock);
+ }
+
+ /* delete from queue */
+ list_del_init(&pmsg->queue);
+ }
+
+ spin_unlock(&data->lock);
+ return;
+ }
+
+ data->bcurrent_msg_processing = true;
+ dev_dbg(&data->master->dev,
+ "%s Set data->bcurrent_msg_processing= true\n", __func__);
+
+ /* Get the message from the queue and delete it from there. */
+ data->current_msg = list_entry(data->queue.next, struct spi_message,
+ queue);
+
+ list_del_init(&data->current_msg->queue);
+
+ data->current_msg->status = 0;
+
+ pch_spi_select_chip(data, data->current_msg->spi);
+
+ spin_unlock(&data->lock);
+
+ if (data->use_dma)
+ pch_spi_request_dma(data,
+ data->current_msg->spi->bits_per_word);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
+ do {
+ int cnt;
+ /* If we are already processing a message get the next
+ transfer structure from the message otherwise retrieve
+ the 1st transfer request from the message. */
+ spin_lock(&data->lock);
+ if (data->cur_trans == NULL) {
+ data->cur_trans =
+ list_entry(data->current_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ dev_dbg(&data->master->dev,
+ "%s :Getting 1st transfer message\n",
+ __func__);
+ } else {
+ data->cur_trans =
+ list_entry(data->cur_trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ dev_dbg(&data->master->dev,
+ "%s :Getting next transfer message\n",
+ __func__);
+ }
+ spin_unlock(&data->lock);
+
+ if (!data->cur_trans->len)
+ goto out;
+ cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
+ data->save_total_len = data->cur_trans->len;
+ if (data->use_dma) {
+ int i;
+ char *save_rx_buf = data->cur_trans->rx_buf;
+
+ for (i = 0; i < cnt; i++) {
+ pch_spi_handle_dma(data, &bpw);
+ if (!pch_spi_start_transfer(data)) {
+ data->transfer_complete = true;
+ data->current_msg->status = -EIO;
+ data->current_msg->complete
+ (data->current_msg->context);
+ data->bcurrent_msg_processing = false;
+ data->current_msg = NULL;
+ data->cur_trans = NULL;
+ goto out;
+ }
+ pch_spi_copy_rx_data_for_dma(data, bpw);
+ }
+ data->cur_trans->rx_buf = save_rx_buf;
+ } else {
+ pch_spi_set_tx(data, &bpw);
+ pch_spi_set_ir(data);
+ pch_spi_copy_rx_data(data, bpw);
+ kfree(data->pkt_rx_buff);
+ data->pkt_rx_buff = NULL;
+ kfree(data->pkt_tx_buff);
+ data->pkt_tx_buff = NULL;
+ }
+ /* increment message count */
+ data->cur_trans->len = data->save_total_len;
+ data->current_msg->actual_length += data->cur_trans->len;
+
+ dev_dbg(&data->master->dev,
+ "%s:data->current_msg->actual_length=%d\n",
+ __func__, data->current_msg->actual_length);
+
+ spi_transfer_delay_exec(data->cur_trans);
+
+ spin_lock(&data->lock);
+
+ /* No more transfer in this message. */
+ if ((data->cur_trans->transfer_list.next) ==
+ &(data->current_msg->transfers)) {
+ pch_spi_nomore_transfer(data);
+ }
+
+ spin_unlock(&data->lock);
+
+ } while (data->cur_trans != NULL);
+
+out:
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
+ if (data->use_dma)
+ pch_spi_release_dma(data);
+}
+
+static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+{
+ dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
+
+ flush_work(&data->work);
+}
+
+static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+{
+ dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
+
+ /* reset PCH SPI h/w */
+ pch_spi_reset(data->master);
+ dev_dbg(&board_dat->pdev->dev,
+ "%s pch_spi_reset invoked successfully\n", __func__);
+
+ dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
+
+ return 0;
+}
+
+static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ if (dma->tx_buf_dma)
+ dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
+ dma->tx_buf_virt, dma->tx_buf_dma);
+ if (dma->rx_buf_dma)
+ dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
+ dma->rx_buf_virt, dma->rx_buf_dma);
+}
+
+static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+ int ret;
+
+ dma = &data->dma;
+ ret = 0;
+ /* Get Consistent memory for Tx DMA */
+ dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+ PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
+ if (!dma->tx_buf_virt)
+ ret = -ENOMEM;
+
+ /* Get Consistent memory for Rx DMA */
+ dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+ PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
+ if (!dma->rx_buf_virt)
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+static int pch_spi_pd_probe(struct platform_device *plat_dev)
+{
+ int ret;
+ struct spi_master *master;
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
+ struct pch_spi_data *data;
+
+ dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
+
+ master = spi_alloc_master(&board_dat->pdev->dev,
+ sizeof(struct pch_spi_data));
+ if (!master) {
+ dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
+ plat_dev->id);
+ return -ENOMEM;
+ }
+
+ data = spi_master_get_devdata(master);
+ data->master = master;
+
+ platform_set_drvdata(plat_dev, data);
+
+ /* baseaddress + address offset) */
+ data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
+ PCH_ADDRESS_SIZE * plat_dev->id;
+ data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
+ if (!data->io_remap_addr) {
+ dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
+ ret = -ENOMEM;
+ goto err_pci_iomap;
+ }
+ data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
+
+ dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
+ plat_dev->id, data->io_remap_addr);
+
+ /* initialize members of SPI master */
+ master->num_chipselect = PCH_MAX_CS;
+ master->transfer = pch_spi_transfer;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->max_speed_hz = PCH_MAX_BAUDRATE;
+ master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+
+ data->board_dat = board_dat;
+ data->plat_dev = plat_dev;
+ data->n_curnt_chip = 255;
+ data->status = STATUS_RUNNING;
+ data->ch = plat_dev->id;
+ data->use_dma = use_dma;
+
+ INIT_LIST_HEAD(&data->queue);
+ spin_lock_init(&data->lock);
+ INIT_WORK(&data->work, pch_spi_process_messages);
+ init_waitqueue_head(&data->wait);
+
+ ret = pch_spi_get_resources(board_dat, data);
+ if (ret) {
+ dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
+ goto err_spi_get_resources;
+ }
+
+ ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
+ IRQF_SHARED, KBUILD_MODNAME, data);
+ if (ret) {
+ dev_err(&plat_dev->dev,
+ "%s request_irq failed\n", __func__);
+ goto err_request_irq;
+ }
+ data->irq_reg_sts = true;
+
+ pch_spi_set_master_mode(master);
+
+ if (use_dma) {
+ dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+ ret = pch_alloc_dma_buf(board_dat, data);
+ if (ret)
+ goto err_spi_register_master;
+ }
+
+ ret = spi_register_master(master);
+ if (ret != 0) {
+ dev_err(&plat_dev->dev,
+ "%s spi_register_master FAILED\n", __func__);
+ goto err_spi_register_master;
+ }
+
+ return 0;
+
+err_spi_register_master:
+ pch_free_dma_buf(board_dat, data);
+ free_irq(board_dat->pdev->irq, data);
+err_request_irq:
+ pch_spi_free_resources(board_dat, data);
+err_spi_get_resources:
+ pci_iounmap(board_dat->pdev, data->io_remap_addr);
+err_pci_iomap:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int pch_spi_pd_remove(struct platform_device *plat_dev)
+{
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(plat_dev);
+ int count;
+ unsigned long flags;
+
+ dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
+ __func__, plat_dev->id, board_dat->pdev->irq);
+
+ if (use_dma)
+ pch_free_dma_buf(board_dat, data);
+
+ /* check for any pending messages; no action is taken if the queue
+ * is still full; but at least we tried. Unload anyway */
+ count = 500;
+ spin_lock_irqsave(&data->lock, flags);
+ data->status = STATUS_EXITING;
+ while ((list_empty(&data->queue) == 0) && --count) {
+ dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
+ __func__);
+ spin_unlock_irqrestore(&data->lock, flags);
+ msleep(PCH_SLEEP_TIME);
+ spin_lock_irqsave(&data->lock, flags);
+ }
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ pch_spi_free_resources(board_dat, data);
+ /* disable interrupts & free IRQ */
+ if (data->irq_reg_sts) {
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ data->irq_reg_sts = false;
+ free_irq(board_dat->pdev->irq, data);
+ }
+
+ pci_iounmap(board_dat->pdev, data->io_remap_addr);
+ spi_unregister_master(data->master);
+
+ return 0;
+}
+#ifdef CONFIG_PM
+static int pch_spi_pd_suspend(struct platform_device *pd_dev,
+ pm_message_t state)
+{
+ u8 count;
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(pd_dev);
+
+ dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
+
+ if (!board_dat) {
+ dev_err(&pd_dev->dev,
+ "%s pci_get_drvdata returned NULL\n", __func__);
+ return -EFAULT;
+ }
+
+ /* check if the current message is processed:
+ Only after thats done the transfer will be suspended */
+ count = 255;
+ while ((--count) > 0) {
+ if (!(data->bcurrent_msg_processing))
+ break;
+ msleep(PCH_SLEEP_TIME);
+ }
+
+ /* Free IRQ */
+ if (data->irq_reg_sts) {
+ /* disable all interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ pch_spi_reset(data->master);
+ free_irq(board_dat->pdev->irq, data);
+
+ data->irq_reg_sts = false;
+ dev_dbg(&pd_dev->dev,
+ "%s free_irq invoked successfully.\n", __func__);
+ }
+
+ return 0;
+}
+
+static int pch_spi_pd_resume(struct platform_device *pd_dev)
+{
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(pd_dev);
+ int retval;
+
+ if (!board_dat) {
+ dev_err(&pd_dev->dev,
+ "%s pci_get_drvdata returned NULL\n", __func__);
+ return -EFAULT;
+ }
+
+ if (!data->irq_reg_sts) {
+ /* register IRQ */
+ retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
+ IRQF_SHARED, KBUILD_MODNAME, data);
+ if (retval < 0) {
+ dev_err(&pd_dev->dev,
+ "%s request_irq failed\n", __func__);
+ return retval;
+ }
+
+ /* reset PCH SPI h/w */
+ pch_spi_reset(data->master);
+ pch_spi_set_master_mode(data->master);
+ data->irq_reg_sts = true;
+ }
+ return 0;
+}
+#else
+#define pch_spi_pd_suspend NULL
+#define pch_spi_pd_resume NULL
+#endif
+
+static struct platform_driver pch_spi_pd_driver = {
+ .driver = {
+ .name = "pch-spi",
+ },
+ .probe = pch_spi_pd_probe,
+ .remove = pch_spi_pd_remove,
+ .suspend = pch_spi_pd_suspend,
+ .resume = pch_spi_pd_resume
+};
+
+static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct pch_spi_board_data *board_dat;
+ struct platform_device *pd_dev = NULL;
+ int retval;
+ int i;
+ struct pch_pd_dev_save *pd_dev_save;
+
+ pd_dev_save = kzalloc(sizeof(*pd_dev_save), GFP_KERNEL);
+ if (!pd_dev_save)
+ return -ENOMEM;
+
+ board_dat = kzalloc(sizeof(*board_dat), GFP_KERNEL);
+ if (!board_dat) {
+ retval = -ENOMEM;
+ goto err_no_mem;
+ }
+
+ retval = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (retval) {
+ dev_err(&pdev->dev, "%s request_region failed\n", __func__);
+ goto pci_request_regions;
+ }
+
+ board_dat->pdev = pdev;
+ board_dat->num = id->driver_data;
+ pd_dev_save->num = id->driver_data;
+ pd_dev_save->board_dat = board_dat;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
+ goto pci_enable_device;
+ }
+
+ for (i = 0; i < board_dat->num; i++) {
+ pd_dev = platform_device_alloc("pch-spi", i);
+ if (!pd_dev) {
+ dev_err(&pdev->dev, "platform_device_alloc failed\n");
+ retval = -ENOMEM;
+ goto err_platform_device;
+ }
+ pd_dev_save->pd_save[i] = pd_dev;
+ pd_dev->dev.parent = &pdev->dev;
+
+ retval = platform_device_add_data(pd_dev, board_dat,
+ sizeof(*board_dat));
+ if (retval) {
+ dev_err(&pdev->dev,
+ "platform_device_add_data failed\n");
+ platform_device_put(pd_dev);
+ goto err_platform_device;
+ }
+
+ retval = platform_device_add(pd_dev);
+ if (retval) {
+ dev_err(&pdev->dev, "platform_device_add failed\n");
+ platform_device_put(pd_dev);
+ goto err_platform_device;
+ }
+ }
+
+ pci_set_drvdata(pdev, pd_dev_save);
+
+ return 0;
+
+err_platform_device:
+ while (--i >= 0)
+ platform_device_unregister(pd_dev_save->pd_save[i]);
+ pci_disable_device(pdev);
+pci_enable_device:
+ pci_release_regions(pdev);
+pci_request_regions:
+ kfree(board_dat);
+err_no_mem:
+ kfree(pd_dev_save);
+
+ return retval;
+}
+
+static void pch_spi_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
+
+ for (i = 0; i < pd_dev_save->num; i++)
+ platform_device_unregister(pd_dev_save->pd_save[i]);
+
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ kfree(pd_dev_save->board_dat);
+ kfree(pd_dev_save);
+}
+
+static int __maybe_unused pch_spi_suspend(struct device *dev)
+{
+ struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s ENTRY\n", __func__);
+
+ pd_dev_save->board_dat->suspend_sts = true;
+
+ return 0;
+}
+
+static int __maybe_unused pch_spi_resume(struct device *dev)
+{
+ struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s ENTRY\n", __func__);
+
+ /* set suspend status to false */
+ pd_dev_save->board_dat->suspend_sts = false;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(pch_spi_pm_ops, pch_spi_suspend, pch_spi_resume);
+
+static struct pci_driver pch_spi_pcidev_driver = {
+ .name = "pch_spi",
+ .id_table = pch_spi_pcidev_id,
+ .probe = pch_spi_probe,
+ .remove = pch_spi_remove,
+ .driver.pm = &pch_spi_pm_ops,
+};
+
+static int __init pch_spi_init(void)
+{
+ int ret;
+ ret = platform_driver_register(&pch_spi_pd_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&pch_spi_pcidev_driver);
+ if (ret) {
+ platform_driver_unregister(&pch_spi_pd_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(pch_spi_init);
+
+static void __exit pch_spi_exit(void)
+{
+ pci_unregister_driver(&pch_spi_pcidev_driver);
+ platform_driver_unregister(&pch_spi_pd_driver);
+}
+module_exit(pch_spi_exit);
+
+module_param(use_dma, int, 0644);
+MODULE_PARM_DESC(use_dma,
+ "to use DMA for data transfers pass 1 else 0; default 1");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
+MODULE_DEVICE_TABLE(pci, pch_spi_pcidev_id);
+
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
new file mode 100644
index 000000000..cc0da4822
--- /dev/null
+++ b/drivers/spi/spi-uniphier.c
@@ -0,0 +1,812 @@
+// SPDX-License-Identifier: GPL-2.0
+// spi-uniphier.c - Socionext UniPhier SPI controller driver
+// Copyright 2012 Panasonic Corporation
+// Copyright 2016-2018 Socionext Inc.
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#include <asm/unaligned.h>
+
+#define SSI_TIMEOUT_MS 2000
+#define SSI_POLL_TIMEOUT_US 200
+#define SSI_MAX_CLK_DIVIDER 254
+#define SSI_MIN_CLK_DIVIDER 4
+
+struct uniphier_spi_priv {
+ void __iomem *base;
+ dma_addr_t base_dma_addr;
+ struct clk *clk;
+ struct spi_master *master;
+ struct completion xfer_done;
+
+ int error;
+ unsigned int tx_bytes;
+ unsigned int rx_bytes;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ atomic_t dma_busy;
+
+ bool is_save_param;
+ u8 bits_per_word;
+ u16 mode;
+ u32 speed_hz;
+};
+
+#define SSI_CTL 0x00
+#define SSI_CTL_EN BIT(0)
+
+#define SSI_CKS 0x04
+#define SSI_CKS_CKRAT_MASK GENMASK(7, 0)
+#define SSI_CKS_CKPHS BIT(14)
+#define SSI_CKS_CKINIT BIT(13)
+#define SSI_CKS_CKDLY BIT(12)
+
+#define SSI_TXWDS 0x08
+#define SSI_TXWDS_WDLEN_MASK GENMASK(13, 8)
+#define SSI_TXWDS_TDTF_MASK GENMASK(7, 6)
+#define SSI_TXWDS_DTLEN_MASK GENMASK(5, 0)
+
+#define SSI_RXWDS 0x0c
+#define SSI_RXWDS_DTLEN_MASK GENMASK(5, 0)
+
+#define SSI_FPS 0x10
+#define SSI_FPS_FSPOL BIT(15)
+#define SSI_FPS_FSTRT BIT(14)
+
+#define SSI_SR 0x14
+#define SSI_SR_BUSY BIT(7)
+#define SSI_SR_RNE BIT(0)
+
+#define SSI_IE 0x18
+#define SSI_IE_TCIE BIT(4)
+#define SSI_IE_RCIE BIT(3)
+#define SSI_IE_TXRE BIT(2)
+#define SSI_IE_RXRE BIT(1)
+#define SSI_IE_RORIE BIT(0)
+#define SSI_IE_ALL_MASK GENMASK(4, 0)
+
+#define SSI_IS 0x1c
+#define SSI_IS_RXRS BIT(9)
+#define SSI_IS_RCID BIT(3)
+#define SSI_IS_RORID BIT(0)
+
+#define SSI_IC 0x1c
+#define SSI_IC_TCIC BIT(4)
+#define SSI_IC_RCIC BIT(3)
+#define SSI_IC_RORIC BIT(0)
+
+#define SSI_FC 0x20
+#define SSI_FC_TXFFL BIT(12)
+#define SSI_FC_TXFTH_MASK GENMASK(11, 8)
+#define SSI_FC_RXFFL BIT(4)
+#define SSI_FC_RXFTH_MASK GENMASK(3, 0)
+
+#define SSI_TXDR 0x24
+#define SSI_RXDR 0x24
+
+#define SSI_FIFO_DEPTH 8U
+#define SSI_FIFO_BURST_NUM 1
+
+#define SSI_DMA_RX_BUSY BIT(1)
+#define SSI_DMA_TX_BUSY BIT(0)
+
+static inline unsigned int bytes_per_word(unsigned int bits)
+{
+ return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
+}
+
+static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
+ u32 mask)
+{
+ u32 val;
+
+ val = readl(priv->base + SSI_IE);
+ val |= mask;
+ writel(val, priv->base + SSI_IE);
+}
+
+static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
+ u32 mask)
+{
+ u32 val;
+
+ val = readl(priv->base + SSI_IE);
+ val &= ~mask;
+ writel(val, priv->base + SSI_IE);
+}
+
+static void uniphier_spi_set_mode(struct spi_device *spi)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val1, val2;
+
+ /*
+ * clock setting
+ * CKPHS capture timing. 0:rising edge, 1:falling edge
+ * CKINIT clock initial level. 0:low, 1:high
+ * CKDLY clock delay. 0:no delay, 1:delay depending on FSTRT
+ * (FSTRT=0: 1 clock, FSTRT=1: 0.5 clock)
+ *
+ * frame setting
+ * FSPOL frame signal porarity. 0: low, 1: high
+ * FSTRT start frame timing
+ * 0: rising edge of clock, 1: falling edge of clock
+ */
+ switch (spi->mode & SPI_MODE_X_MASK) {
+ case SPI_MODE_0:
+ /* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
+ val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;
+ val2 = 0;
+ break;
+ case SPI_MODE_1:
+ /* CKPHS=0, CKINIT=0, CKDLY=0, FSTRT=1 */
+ val1 = 0;
+ val2 = SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_2:
+ /* CKPHS=0, CKINIT=1, CKDLY=1, FSTRT=1 */
+ val1 = SSI_CKS_CKINIT | SSI_CKS_CKDLY;
+ val2 = SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_3:
+ /* CKPHS=1, CKINIT=1, CKDLY=0, FSTRT=0 */
+ val1 = SSI_CKS_CKPHS | SSI_CKS_CKINIT;
+ val2 = 0;
+ break;
+ }
+
+ if (!(spi->mode & SPI_CS_HIGH))
+ val2 |= SSI_FPS_FSPOL;
+
+ writel(val1, priv->base + SSI_CKS);
+ writel(val2, priv->base + SSI_FPS);
+
+ val1 = 0;
+ if (spi->mode & SPI_LSB_FIRST)
+ val1 |= FIELD_PREP(SSI_TXWDS_TDTF_MASK, 1);
+ writel(val1, priv->base + SSI_TXWDS);
+ writel(val1, priv->base + SSI_RXWDS);
+}
+
+static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(priv->base + SSI_TXWDS);
+ val &= ~(SSI_TXWDS_WDLEN_MASK | SSI_TXWDS_DTLEN_MASK);
+ val |= FIELD_PREP(SSI_TXWDS_WDLEN_MASK, size);
+ val |= FIELD_PREP(SSI_TXWDS_DTLEN_MASK, size);
+ writel(val, priv->base + SSI_TXWDS);
+
+ val = readl(priv->base + SSI_RXWDS);
+ val &= ~SSI_RXWDS_DTLEN_MASK;
+ val |= FIELD_PREP(SSI_RXWDS_DTLEN_MASK, size);
+ writel(val, priv->base + SSI_RXWDS);
+}
+
+static void uniphier_spi_set_baudrate(struct spi_device *spi,
+ unsigned int speed)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val, ckdiv;
+
+ /*
+ * the supported rates are even numbers from 4 to 254. (4,6,8...254)
+ * round up as we look for equal or less speed
+ */
+ ckdiv = DIV_ROUND_UP(clk_get_rate(priv->clk), speed);
+ ckdiv = round_up(ckdiv, 2);
+
+ val = readl(priv->base + SSI_CKS);
+ val &= ~SSI_CKS_CKRAT_MASK;
+ val |= ckdiv & SSI_CKS_CKRAT_MASK;
+ writel(val, priv->base + SSI_CKS);
+}
+
+static void uniphier_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ priv->error = 0;
+ priv->tx_buf = t->tx_buf;
+ priv->rx_buf = t->rx_buf;
+ priv->tx_bytes = priv->rx_bytes = t->len;
+
+ if (!priv->is_save_param || priv->mode != spi->mode) {
+ uniphier_spi_set_mode(spi);
+ priv->mode = spi->mode;
+ priv->is_save_param = false;
+ }
+
+ if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
+ uniphier_spi_set_transfer_size(spi, t->bits_per_word);
+ priv->bits_per_word = t->bits_per_word;
+ }
+
+ if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
+ uniphier_spi_set_baudrate(spi, t->speed_hz);
+ priv->speed_hz = t->speed_hz;
+ }
+
+ priv->is_save_param = true;
+
+ /* reset FIFOs */
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+ writel(val, priv->base + SSI_FC);
+}
+
+static void uniphier_spi_send(struct uniphier_spi_priv *priv)
+{
+ int wsize;
+ u32 val = 0;
+
+ wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
+ priv->tx_bytes -= wsize;
+
+ if (priv->tx_buf) {
+ switch (wsize) {
+ case 1:
+ val = *priv->tx_buf;
+ break;
+ case 2:
+ val = get_unaligned_le16(priv->tx_buf);
+ break;
+ case 4:
+ val = get_unaligned_le32(priv->tx_buf);
+ break;
+ }
+
+ priv->tx_buf += wsize;
+ }
+
+ writel(val, priv->base + SSI_TXDR);
+}
+
+static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
+{
+ int rsize;
+ u32 val;
+
+ rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
+ priv->rx_bytes -= rsize;
+
+ val = readl(priv->base + SSI_RXDR);
+
+ if (priv->rx_buf) {
+ switch (rsize) {
+ case 1:
+ *priv->rx_buf = val;
+ break;
+ case 2:
+ put_unaligned_le16(val, priv->rx_buf);
+ break;
+ case 4:
+ put_unaligned_le32(val, priv->rx_buf);
+ break;
+ }
+
+ priv->rx_buf += rsize;
+ }
+}
+
+static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
+ unsigned int threshold)
+{
+ u32 val;
+
+ val = readl(priv->base + SSI_FC);
+ val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
+ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
+ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
+ writel(val, priv->base + SSI_FC);
+}
+
+static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+{
+ unsigned int fifo_threshold, fill_words;
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+ fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
+ fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
+
+ uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
+
+ fill_words = fifo_threshold -
+ DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
+
+ while (fill_words--)
+ uniphier_spi_send(priv);
+}
+
+static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(priv->base + SSI_FPS);
+
+ if (enable)
+ val |= SSI_FPS_FSPOL;
+ else
+ val &= ~SSI_FPS_FSPOL;
+
+ writel(val, priv->base + SSI_FPS);
+}
+
+static bool uniphier_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+ if ((!master->dma_tx && !master->dma_rx)
+ || (!master->dma_tx && t->tx_buf)
+ || (!master->dma_rx && t->rx_buf))
+ return false;
+
+ return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
+}
+
+static void uniphier_spi_dma_rxcb(void *data)
+{
+ struct spi_master *master = data;
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
+
+ if (!(state & SSI_DMA_TX_BUSY))
+ spi_finalize_current_transfer(master);
+}
+
+static void uniphier_spi_dma_txcb(void *data)
+{
+ struct spi_master *master = data;
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
+
+ if (!(state & SSI_DMA_RX_BUSY))
+ spi_finalize_current_transfer(master);
+}
+
+static int uniphier_spi_transfer_one_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
+ int buswidth;
+
+ atomic_set(&priv->dma_busy, 0);
+
+ uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
+
+ if (priv->bits_per_word <= 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (priv->bits_per_word <= 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ if (priv->rx_buf) {
+ struct dma_slave_config rxconf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = priv->base_dma_addr + SSI_RXDR,
+ .src_addr_width = buswidth,
+ .src_maxburst = SSI_FIFO_BURST_NUM,
+ };
+
+ dmaengine_slave_config(master->dma_rx, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(
+ master->dma_rx,
+ t->rx_sg.sgl, t->rx_sg.nents,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto out_err_prep;
+
+ rxdesc->callback = uniphier_spi_dma_rxcb;
+ rxdesc->callback_param = master;
+
+ uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
+ atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(master->dma_rx);
+ }
+
+ if (priv->tx_buf) {
+ struct dma_slave_config txconf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = priv->base_dma_addr + SSI_TXDR,
+ .dst_addr_width = buswidth,
+ .dst_maxburst = SSI_FIFO_BURST_NUM,
+ };
+
+ dmaengine_slave_config(master->dma_tx, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(
+ master->dma_tx,
+ t->tx_sg.sgl, t->tx_sg.nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto out_err_prep;
+
+ txdesc->callback = uniphier_spi_dma_txcb;
+ txdesc->callback_param = master;
+
+ uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
+ atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(master->dma_tx);
+ }
+
+ /* signal that we need to wait for completion */
+ return (priv->tx_buf || priv->rx_buf);
+
+out_err_prep:
+ if (rxdesc)
+ dmaengine_terminate_sync(master->dma_rx);
+
+ return -EINVAL;
+}
+
+static int uniphier_spi_transfer_one_irq(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct device *dev = master->dev.parent;
+ unsigned long time_left;
+
+ reinit_completion(&priv->xfer_done);
+
+ uniphier_spi_fill_tx_fifo(priv);
+
+ uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
+
+ time_left = wait_for_completion_timeout(&priv->xfer_done,
+ msecs_to_jiffies(SSI_TIMEOUT_MS));
+
+ uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
+
+ if (!time_left) {
+ dev_err(dev, "transfer timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ return priv->error;
+}
+
+static int uniphier_spi_transfer_one_poll(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int loop = SSI_POLL_TIMEOUT_US * 10;
+
+ while (priv->tx_bytes) {
+ uniphier_spi_fill_tx_fifo(priv);
+
+ while ((priv->rx_bytes - priv->tx_bytes) > 0) {
+ while (!(readl(priv->base + SSI_SR) & SSI_SR_RNE)
+ && loop--)
+ ndelay(100);
+
+ if (loop == -1)
+ goto irq_transfer;
+
+ uniphier_spi_recv(priv);
+ }
+ }
+
+ return 0;
+
+irq_transfer:
+ return uniphier_spi_transfer_one_irq(master, spi, t);
+}
+
+static int uniphier_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ unsigned long threshold;
+ bool use_dma;
+
+ /* Terminate and return success for 0 byte length transfer */
+ if (!t->len)
+ return 0;
+
+ uniphier_spi_setup_transfer(spi, t);
+
+ use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
+ if (use_dma)
+ return uniphier_spi_transfer_one_dma(master, spi, t);
+
+ /*
+ * If the transfer operation will take longer than
+ * SSI_POLL_TIMEOUT_US, it should use irq.
+ */
+ threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
+ USEC_PER_SEC * BITS_PER_BYTE);
+ if (t->len > threshold)
+ return uniphier_spi_transfer_one_irq(master, spi, t);
+ else
+ return uniphier_spi_transfer_one_poll(master, spi, t);
+}
+
+static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+
+ writel(SSI_CTL_EN, priv->base + SSI_CTL);
+
+ return 0;
+}
+
+static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+
+ writel(0, priv->base + SSI_CTL);
+
+ return 0;
+}
+
+static void uniphier_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ u32 val;
+
+ /* stop running spi transfer */
+ writel(0, priv->base + SSI_CTL);
+
+ /* reset FIFOs */
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+ writel(val, priv->base + SSI_FC);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
+
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
+ dmaengine_terminate_async(master->dma_tx);
+ atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+ }
+
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
+ dmaengine_terminate_async(master->dma_rx);
+ atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+ }
+}
+
+static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
+{
+ struct uniphier_spi_priv *priv = dev_id;
+ u32 val, stat;
+
+ stat = readl(priv->base + SSI_IS);
+ val = SSI_IC_TCIC | SSI_IC_RCIC | SSI_IC_RORIC;
+ writel(val, priv->base + SSI_IC);
+
+ /* rx fifo overrun */
+ if (stat & SSI_IS_RORID) {
+ priv->error = -EIO;
+ goto done;
+ }
+
+ /* rx complete */
+ if ((stat & SSI_IS_RCID) && (stat & SSI_IS_RXRS)) {
+ while ((readl(priv->base + SSI_SR) & SSI_SR_RNE) &&
+ (priv->rx_bytes - priv->tx_bytes) > 0)
+ uniphier_spi_recv(priv);
+
+ if ((readl(priv->base + SSI_SR) & SSI_SR_RNE) ||
+ (priv->rx_bytes != priv->tx_bytes)) {
+ priv->error = -EIO;
+ goto done;
+ } else if (priv->rx_bytes == 0)
+ goto done;
+
+ /* next tx transfer */
+ uniphier_spi_fill_tx_fifo(priv);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+
+done:
+ complete(&priv->xfer_done);
+ return IRQ_HANDLED;
+}
+
+static int uniphier_spi_probe(struct platform_device *pdev)
+{
+ struct uniphier_spi_priv *priv;
+ struct spi_master *master;
+ struct resource *res;
+ struct dma_slave_caps caps;
+ u32 dma_tx_burst = 0, dma_rx_burst = 0;
+ unsigned long clk_rate;
+ int irq;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*priv));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ priv = spi_master_get_devdata(master);
+ priv->master = master;
+ priv->is_save_param = false;
+
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_master_put;
+ }
+ priv->base_dma_addr = res->start;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(priv->clk);
+ goto out_master_put;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_master_put;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_disable_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, uniphier_spi_handler,
+ 0, "uniphier-spi", priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto out_disable_clk;
+ }
+
+ init_completion(&priv->xfer_done);
+
+ clk_rate = clk_get_rate(priv->clk);
+
+ master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
+ master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+
+ master->set_cs = uniphier_spi_set_cs;
+ master->transfer_one = uniphier_spi_transfer_one;
+ master->prepare_transfer_hardware
+ = uniphier_spi_prepare_transfer_hardware;
+ master->unprepare_transfer_hardware
+ = uniphier_spi_unprepare_transfer_hardware;
+ master->handle_err = uniphier_spi_handle_err;
+ master->can_dma = uniphier_spi_can_dma;
+
+ master->num_chipselect = 1;
+ master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+
+ master->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR_OR_NULL(master->dma_tx)) {
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_disable_clk;
+ }
+ master->dma_tx = NULL;
+ dma_tx_burst = INT_MAX;
+ } else {
+ ret = dma_get_slave_caps(master->dma_tx, &caps);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
+ ret);
+ goto out_release_dma;
+ }
+ dma_tx_burst = caps.max_burst;
+ }
+
+ master->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR_OR_NULL(master->dma_rx)) {
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_release_dma;
+ }
+ master->dma_rx = NULL;
+ dma_rx_burst = INT_MAX;
+ } else {
+ ret = dma_get_slave_caps(master->dma_rx, &caps);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
+ ret);
+ goto out_release_dma;
+ }
+ dma_rx_burst = caps.max_burst;
+ }
+
+ master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ goto out_release_dma;
+
+ return 0;
+
+out_release_dma:
+ if (!IS_ERR_OR_NULL(master->dma_rx)) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+ if (!IS_ERR_OR_NULL(master->dma_tx)) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+
+out_disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+out_master_put:
+ spi_master_put(master);
+ return ret;
+}
+
+static int uniphier_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id uniphier_spi_match[] = {
+ { .compatible = "socionext,uniphier-scssi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_spi_match);
+
+static struct platform_driver uniphier_spi_driver = {
+ .probe = uniphier_spi_probe,
+ .remove = uniphier_spi_remove,
+ .driver = {
+ .name = "uniphier-spi",
+ .of_match_table = uniphier_spi_match,
+ },
+};
+module_platform_driver(uniphier_spi_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
+MODULE_DESCRIPTION("Socionext UniPhier SPI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
new file mode 100644
index 000000000..1d9b3f03d
--- /dev/null
+++ b/drivers/spi/spi-xcomm.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Analog Devices AD-FMCOMMS1-EBZ board I2C-SPI bridge driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <asm/unaligned.h>
+
+#define SPI_XCOMM_SETTINGS_LEN_OFFSET 10
+#define SPI_XCOMM_SETTINGS_3WIRE BIT(6)
+#define SPI_XCOMM_SETTINGS_CS_HIGH BIT(5)
+#define SPI_XCOMM_SETTINGS_SAMPLE_END BIT(4)
+#define SPI_XCOMM_SETTINGS_CPHA BIT(3)
+#define SPI_XCOMM_SETTINGS_CPOL BIT(2)
+#define SPI_XCOMM_SETTINGS_CLOCK_DIV_MASK 0x3
+#define SPI_XCOMM_SETTINGS_CLOCK_DIV_64 0x2
+#define SPI_XCOMM_SETTINGS_CLOCK_DIV_16 0x1
+#define SPI_XCOMM_SETTINGS_CLOCK_DIV_4 0x0
+
+#define SPI_XCOMM_CMD_UPDATE_CONFIG 0x03
+#define SPI_XCOMM_CMD_WRITE 0x04
+
+#define SPI_XCOMM_CLOCK 48000000
+
+struct spi_xcomm {
+ struct i2c_client *i2c;
+
+ uint16_t settings;
+ uint16_t chipselect;
+
+ unsigned int current_speed;
+
+ uint8_t buf[63];
+};
+
+static int spi_xcomm_sync_config(struct spi_xcomm *spi_xcomm, unsigned int len)
+{
+ uint16_t settings;
+ uint8_t *buf = spi_xcomm->buf;
+
+ settings = spi_xcomm->settings;
+ settings |= len << SPI_XCOMM_SETTINGS_LEN_OFFSET;
+
+ buf[0] = SPI_XCOMM_CMD_UPDATE_CONFIG;
+ put_unaligned_be16(settings, &buf[1]);
+ put_unaligned_be16(spi_xcomm->chipselect, &buf[3]);
+
+ return i2c_master_send(spi_xcomm->i2c, buf, 5);
+}
+
+static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm,
+ struct spi_device *spi, int is_active)
+{
+ unsigned long cs = spi->chip_select;
+ uint16_t chipselect = spi_xcomm->chipselect;
+
+ if (is_active)
+ chipselect |= BIT(cs);
+ else
+ chipselect &= ~BIT(cs);
+
+ spi_xcomm->chipselect = chipselect;
+}
+
+static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
+ struct spi_device *spi, struct spi_transfer *t, unsigned int *settings)
+{
+ if (t->len > 62)
+ return -EINVAL;
+
+ if (t->speed_hz != spi_xcomm->current_speed) {
+ unsigned int divider;
+
+ divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, t->speed_hz);
+ if (divider >= 64)
+ *settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_64;
+ else if (divider >= 16)
+ *settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_16;
+ else
+ *settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_4;
+
+ spi_xcomm->current_speed = t->speed_hz;
+ }
+
+ if (spi->mode & SPI_CPOL)
+ *settings |= SPI_XCOMM_SETTINGS_CPOL;
+ else
+ *settings &= ~SPI_XCOMM_SETTINGS_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ *settings &= ~SPI_XCOMM_SETTINGS_CPHA;
+ else
+ *settings |= SPI_XCOMM_SETTINGS_CPHA;
+
+ if (spi->mode & SPI_3WIRE)
+ *settings |= SPI_XCOMM_SETTINGS_3WIRE;
+ else
+ *settings &= ~SPI_XCOMM_SETTINGS_3WIRE;
+
+ return 0;
+}
+
+static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ int ret;
+
+ if (t->tx_buf) {
+ spi_xcomm->buf[0] = SPI_XCOMM_CMD_WRITE;
+ memcpy(spi_xcomm->buf + 1, t->tx_buf, t->len);
+
+ ret = i2c_master_send(spi_xcomm->i2c, spi_xcomm->buf, t->len + 1);
+ if (ret < 0)
+ return ret;
+ else if (ret != t->len + 1)
+ return -EIO;
+ } else if (t->rx_buf) {
+ ret = i2c_master_recv(spi_xcomm->i2c, t->rx_buf, t->len);
+ if (ret < 0)
+ return ret;
+ else if (ret != t->len)
+ return -EIO;
+ }
+
+ return t->len;
+}
+
+static int spi_xcomm_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_xcomm *spi_xcomm = spi_master_get_devdata(master);
+ unsigned int settings = spi_xcomm->settings;
+ struct spi_device *spi = msg->spi;
+ unsigned cs_change = 0;
+ struct spi_transfer *t;
+ bool is_first = true;
+ int status = 0;
+ bool is_last;
+
+ spi_xcomm_chipselect(spi_xcomm, spi, true);
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+
+ if (!t->tx_buf && !t->rx_buf && t->len) {
+ status = -EINVAL;
+ break;
+ }
+
+ status = spi_xcomm_setup_transfer(spi_xcomm, spi, t, &settings);
+ if (status < 0)
+ break;
+
+ is_last = list_is_last(&t->transfer_list, &msg->transfers);
+ cs_change = t->cs_change;
+
+ if (cs_change ^ is_last)
+ settings |= BIT(5);
+ else
+ settings &= ~BIT(5);
+
+ if (t->rx_buf) {
+ spi_xcomm->settings = settings;
+ status = spi_xcomm_sync_config(spi_xcomm, t->len);
+ if (status < 0)
+ break;
+ } else if (settings != spi_xcomm->settings || is_first) {
+ spi_xcomm->settings = settings;
+ status = spi_xcomm_sync_config(spi_xcomm, 0);
+ if (status < 0)
+ break;
+ }
+
+ if (t->len) {
+ status = spi_xcomm_txrx_bufs(spi_xcomm, spi, t);
+
+ if (status < 0)
+ break;
+
+ if (status > 0)
+ msg->actual_length += status;
+ }
+ status = 0;
+
+ spi_transfer_delay_exec(t);
+
+ is_first = false;
+ }
+
+ if (status != 0 || !cs_change)
+ spi_xcomm_chipselect(spi_xcomm, spi, false);
+
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static int spi_xcomm_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct spi_xcomm *spi_xcomm;
+ struct spi_master *master;
+ int ret;
+
+ master = spi_alloc_master(&i2c->dev, sizeof(*spi_xcomm));
+ if (!master)
+ return -ENOMEM;
+
+ spi_xcomm = spi_master_get_devdata(master);
+ spi_xcomm->i2c = i2c;
+
+ master->num_chipselect = 16;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->transfer_one_message = spi_xcomm_transfer_one;
+ master->dev.of_node = i2c->dev.of_node;
+ i2c_set_clientdata(i2c, master);
+
+ ret = devm_spi_register_master(&i2c->dev, master);
+ if (ret < 0)
+ spi_master_put(master);
+
+ return ret;
+}
+
+static const struct i2c_device_id spi_xcomm_ids[] = {
+ { "spi-xcomm" },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, spi_xcomm_ids);
+
+static struct i2c_driver spi_xcomm_driver = {
+ .driver = {
+ .name = "spi-xcomm",
+ },
+ .id_table = spi_xcomm_ids,
+ .probe = spi_xcomm_probe,
+};
+module_i2c_driver(spi_xcomm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD-FMCOMMS1-EBZ board I2C-SPI bridge driver");
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
new file mode 100644
index 000000000..7377d3b81
--- /dev/null
+++ b/drivers/spi/spi-xilinx.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Xilinx SPI controller driver (master mode only)
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * Copyright (c) 2010 Secret Lab Technologies, Ltd.
+ * Copyright (c) 2009 Intel Corporation
+ * 2002-2007 (c) MontaVista Software, Inc.
+
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/xilinx_spi.h>
+#include <linux/io.h>
+
+#define XILINX_SPI_MAX_CS 32
+
+#define XILINX_SPI_NAME "xilinx_spi"
+
+/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
+ * Product Specification", DS464
+ */
+#define XSPI_CR_OFFSET 0x60 /* Control Register */
+
+#define XSPI_CR_LOOP 0x01
+#define XSPI_CR_ENABLE 0x02
+#define XSPI_CR_MASTER_MODE 0x04
+#define XSPI_CR_CPOL 0x08
+#define XSPI_CR_CPHA 0x10
+#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL | \
+ XSPI_CR_LSB_FIRST | XSPI_CR_LOOP)
+#define XSPI_CR_TXFIFO_RESET 0x20
+#define XSPI_CR_RXFIFO_RESET 0x40
+#define XSPI_CR_MANUAL_SSELECT 0x80
+#define XSPI_CR_TRANS_INHIBIT 0x100
+#define XSPI_CR_LSB_FIRST 0x200
+
+#define XSPI_SR_OFFSET 0x64 /* Status Register */
+
+#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
+#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
+#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */
+#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
+#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
+
+#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */
+#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */
+
+#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
+
+/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
+ * IPIF registers are 32 bit
+ */
+#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */
+#define XIPIF_V123B_GINTR_ENABLE 0x80000000
+
+#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */
+#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */
+
+#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */
+#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while
+ * disabled */
+#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */
+#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
+#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
+#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
+#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */
+
+#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
+#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
+
+struct xilinx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+ void __iomem *regs; /* virt. address of the control registers */
+
+ int irq;
+
+ u8 *rx_ptr; /* pointer in the Tx buffer */
+ const u8 *tx_ptr; /* pointer in the Rx buffer */
+ u8 bytes_per_word;
+ int buffer_size; /* buffer size in words */
+ u32 cs_inactive; /* Level of the CS pins when inactive*/
+ unsigned int (*read_fn)(void __iomem *);
+ void (*write_fn)(u32, void __iomem *);
+};
+
+static void xspi_write32(u32 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+}
+
+static unsigned int xspi_read32(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static void xspi_write32_be(u32 val, void __iomem *addr)
+{
+ iowrite32be(val, addr);
+}
+
+static unsigned int xspi_read32_be(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static void xilinx_spi_tx(struct xilinx_spi *xspi)
+{
+ u32 data = 0;
+
+ if (!xspi->tx_ptr) {
+ xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+ return;
+ }
+
+ switch (xspi->bytes_per_word) {
+ case 1:
+ data = *(u8 *)(xspi->tx_ptr);
+ break;
+ case 2:
+ data = *(u16 *)(xspi->tx_ptr);
+ break;
+ case 4:
+ data = *(u32 *)(xspi->tx_ptr);
+ break;
+ }
+
+ xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
+ xspi->tx_ptr += xspi->bytes_per_word;
+}
+
+static void xilinx_spi_rx(struct xilinx_spi *xspi)
+{
+ u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
+
+ if (!xspi->rx_ptr)
+ return;
+
+ switch (xspi->bytes_per_word) {
+ case 1:
+ *(u8 *)(xspi->rx_ptr) = data;
+ break;
+ case 2:
+ *(u16 *)(xspi->rx_ptr) = data;
+ break;
+ case 4:
+ *(u32 *)(xspi->rx_ptr) = data;
+ break;
+ }
+
+ xspi->rx_ptr += xspi->bytes_per_word;
+}
+
+static void xspi_init_hw(struct xilinx_spi *xspi)
+{
+ void __iomem *regs_base = xspi->regs;
+
+ /* Reset the SPI device */
+ xspi->write_fn(XIPIF_V123B_RESET_MASK,
+ regs_base + XIPIF_V123B_RESETR_OFFSET);
+ /* Enable the transmit empty interrupt, which we use to determine
+ * progress on the transmission.
+ */
+ xspi->write_fn(XSPI_INTR_TX_EMPTY,
+ regs_base + XIPIF_V123B_IIER_OFFSET);
+ /* Disable the global IPIF interrupt */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
+ /* Deselect the slave on the SPI bus */
+ xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
+ /* Disable the transmitter, enable Manual Slave Select Assertion,
+ * put SPI controller into master mode, and enable it */
+ xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
+ XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
+ regs_base + XSPI_CR_OFFSET);
+}
+
+static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ u16 cr;
+ u32 cs;
+
+ if (is_on == BITBANG_CS_INACTIVE) {
+ /* Deselect the slave on the SPI bus */
+ xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
+ return;
+ }
+
+ /* Set the SPI clock phase and polarity */
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
+ if (spi->mode & SPI_CPHA)
+ cr |= XSPI_CR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ cr |= XSPI_CR_CPOL;
+ if (spi->mode & SPI_LSB_FIRST)
+ cr |= XSPI_CR_LSB_FIRST;
+ if (spi->mode & SPI_LOOP)
+ cr |= XSPI_CR_LOOP;
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+
+ /* We do not check spi->max_speed_hz here as the SPI clock
+ * frequency is not software programmable (the IP block design
+ * parameter)
+ */
+
+ cs = xspi->cs_inactive;
+ cs ^= BIT(spi->chip_select);
+
+ /* Activate the chip select */
+ xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
+}
+
+/* spi_bitbang requires custom setup_transfer() to be defined if there is a
+ * custom txrx_bufs().
+ */
+static int xilinx_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+
+ if (spi->mode & SPI_CS_HIGH)
+ xspi->cs_inactive &= ~BIT(spi->chip_select);
+ else
+ xspi->cs_inactive |= BIT(spi->chip_select);
+
+ return 0;
+}
+
+static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ int remaining_words; /* the number of words left to transfer */
+ bool use_irq = false;
+ u16 cr = 0;
+
+ /* We get here with transmitter inhibited */
+
+ xspi->tx_ptr = t->tx_buf;
+ xspi->rx_ptr = t->rx_buf;
+ remaining_words = t->len / xspi->bytes_per_word;
+
+ if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
+ u32 isr;
+ use_irq = true;
+ /* Inhibit irq to avoid spurious irqs on tx_empty*/
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ xspi->regs + XSPI_CR_OFFSET);
+ /* ACK old irqs (if any) */
+ isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ if (isr)
+ xspi->write_fn(isr,
+ xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ /* Enable the global IPIF interrupt */
+ xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+ xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+ reinit_completion(&xspi->done);
+ }
+
+ while (remaining_words) {
+ int n_words, tx_words, rx_words;
+ u32 sr;
+ int stalled;
+
+ n_words = min(remaining_words, xspi->buffer_size);
+
+ tx_words = n_words;
+ while (tx_words--)
+ xilinx_spi_tx(xspi);
+
+ /* Start the transfer by not inhibiting the transmitter any
+ * longer
+ */
+
+ if (use_irq) {
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ wait_for_completion(&xspi->done);
+ /* A transmit has just completed. Process received data
+ * and check for more data to transmit. Always inhibit
+ * the transmitter while the Isr refills the transmit
+ * register/FIFO, or make sure it is stopped if we're
+ * done.
+ */
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ xspi->regs + XSPI_CR_OFFSET);
+ sr = XSPI_SR_TX_EMPTY_MASK;
+ } else
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+
+ /* Read out all the data from the Rx FIFO */
+ rx_words = n_words;
+ stalled = 10;
+ while (rx_words) {
+ if (rx_words == n_words && !(stalled--) &&
+ !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+ (sr & XSPI_SR_RX_EMPTY_MASK)) {
+ dev_err(&spi->dev,
+ "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+ xspi_init_hw(xspi);
+ return -EIO;
+ }
+
+ if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
+ xilinx_spi_rx(xspi);
+ rx_words--;
+ continue;
+ }
+
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ if (!(sr & XSPI_SR_RX_EMPTY_MASK)) {
+ xilinx_spi_rx(xspi);
+ rx_words--;
+ }
+ }
+
+ remaining_words -= n_words;
+ }
+
+ if (use_irq) {
+ xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ }
+
+ return t->len;
+}
+
+
+/* This driver supports single master mode only. Hence Tx FIFO Empty
+ * is the only interrupt we care about.
+ * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
+ * Fault are not to happen.
+ */
+static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
+{
+ struct xilinx_spi *xspi = dev_id;
+ u32 ipif_isr;
+
+ /* Get the IPIF interrupts, and clear them immediately */
+ ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
+
+ if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
+ complete(&xspi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
+{
+ u8 sr;
+ int n_words = 0;
+
+ /*
+ * Before the buffer_size detection we reset the core
+ * to make sure we start with a clean state.
+ */
+ xspi->write_fn(XIPIF_V123B_RESET_MASK,
+ xspi->regs + XIPIF_V123B_RESETR_OFFSET);
+
+ /* Fill the Tx FIFO with as many words as possible */
+ do {
+ xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ n_words++;
+ } while (!(sr & XSPI_SR_TX_FULL_MASK));
+
+ return n_words;
+}
+
+static const struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,axi-quad-spi-1.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.b", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+
+static int xilinx_spi_probe(struct platform_device *pdev)
+{
+ struct xilinx_spi *xspi;
+ struct xspi_platform_data *pdata;
+ struct resource *res;
+ int ret, num_cs = 0, bits_per_word;
+ struct spi_master *master;
+ u32 tmp;
+ u8 i;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ num_cs = pdata->num_chipselect;
+ bits_per_word = pdata->bits_per_word;
+ } else {
+ of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
+ &num_cs);
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "xlnx,num-transfer-bits",
+ &bits_per_word);
+ if (ret)
+ bits_per_word = 8;
+ }
+
+ if (!num_cs) {
+ dev_err(&pdev->dev,
+ "Missing slave select configuration data\n");
+ return -EINVAL;
+ }
+
+ if (num_cs > XILINX_SPI_MAX_CS) {
+ dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+ return -EINVAL;
+ }
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
+ if (!master)
+ return -ENODEV;
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
+ SPI_CS_HIGH;
+
+ xspi = spi_master_get_devdata(master);
+ xspi->cs_inactive = 0xffffffff;
+ xspi->bitbang.master = master;
+ xspi->bitbang.chipselect = xilinx_spi_chipselect;
+ xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
+ xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
+ init_completion(&xspi->done);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xspi->regs))
+ return PTR_ERR(xspi->regs);
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = num_cs;
+ master->dev.of_node = pdev->dev.of_node;
+
+ /*
+ * Detect endianess on the IP via loop bit in CR. Detection
+ * must be done before reset is sent because incorrect reset
+ * value generates error interrupt.
+ * Setup little endian helper functions first and try to use them
+ * and check if bit was correctly setup or not.
+ */
+ xspi->read_fn = xspi_read32;
+ xspi->write_fn = xspi_write32;
+
+ xspi->write_fn(XSPI_CR_LOOP, xspi->regs + XSPI_CR_OFFSET);
+ tmp = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ tmp &= XSPI_CR_LOOP;
+ if (tmp != XSPI_CR_LOOP) {
+ xspi->read_fn = xspi_read32_be;
+ xspi->write_fn = xspi_write32_be;
+ }
+
+ master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
+ xspi->bytes_per_word = bits_per_word / 8;
+ xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
+
+ xspi->irq = platform_get_irq(pdev, 0);
+ if (xspi->irq < 0 && xspi->irq != -ENXIO) {
+ return xspi->irq;
+ } else if (xspi->irq >= 0) {
+ /* Register for SPI Interrupt */
+ ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
+ dev_name(&pdev->dev), xspi);
+ if (ret)
+ return ret;
+ }
+
+ /* SPI controller initializations */
+ xspi_init_hw(xspi);
+
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_bitbang_start FAILED\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "at %pR, irq=%d\n", res, xspi->irq);
+
+ if (pdata) {
+ for (i = 0; i < pdata->num_devices; i++)
+ spi_new_device(master, pdata->devices + i);
+ }
+
+ platform_set_drvdata(pdev, master);
+ return 0;
+}
+
+static int xilinx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ void __iomem *regs_base = xspi->regs;
+
+ spi_bitbang_stop(&xspi->bitbang);
+
+ /* Disable all the interrupts just in case */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
+ /* Disable the global IPIF interrupt */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
+
+ spi_master_put(xspi->bitbang.master);
+
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
+
+static struct platform_driver xilinx_spi_driver = {
+ .probe = xilinx_spi_probe,
+ .remove = xilinx_spi_remove,
+ .driver = {
+ .name = XILINX_SPI_NAME,
+ .of_match_table = xilinx_spi_of_match,
+ },
+};
+module_platform_driver(xilinx_spi_driver);
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION("Xilinx SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
new file mode 100644
index 000000000..e5707fe5c
--- /dev/null
+++ b/drivers/spi/spi-xlp.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2003-2015 Broadcom Corporation
+ * All Rights Reserved
+ */
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+
+/* SPI Configuration Register */
+#define XLP_SPI_CONFIG 0x00
+#define XLP_SPI_CPHA BIT(0)
+#define XLP_SPI_CPOL BIT(1)
+#define XLP_SPI_CS_POL BIT(2)
+#define XLP_SPI_TXMISO_EN BIT(3)
+#define XLP_SPI_TXMOSI_EN BIT(4)
+#define XLP_SPI_RXMISO_EN BIT(5)
+#define XLP_SPI_CS_LSBFE BIT(10)
+#define XLP_SPI_RXCAP_EN BIT(11)
+
+/* SPI Frequency Divider Register */
+#define XLP_SPI_FDIV 0x04
+
+/* SPI Command Register */
+#define XLP_SPI_CMD 0x08
+#define XLP_SPI_CMD_IDLE_MASK 0x0
+#define XLP_SPI_CMD_TX_MASK 0x1
+#define XLP_SPI_CMD_RX_MASK 0x2
+#define XLP_SPI_CMD_TXRX_MASK 0x3
+#define XLP_SPI_CMD_CONT BIT(4)
+#define XLP_SPI_XFR_BITCNT_SHIFT 16
+
+/* SPI Status Register */
+#define XLP_SPI_STATUS 0x0c
+#define XLP_SPI_XFR_PENDING BIT(0)
+#define XLP_SPI_XFR_DONE BIT(1)
+#define XLP_SPI_TX_INT BIT(2)
+#define XLP_SPI_RX_INT BIT(3)
+#define XLP_SPI_TX_UF BIT(4)
+#define XLP_SPI_RX_OF BIT(5)
+#define XLP_SPI_STAT_MASK 0x3f
+
+/* SPI Interrupt Enable Register */
+#define XLP_SPI_INTR_EN 0x10
+#define XLP_SPI_INTR_DONE BIT(0)
+#define XLP_SPI_INTR_TXTH BIT(1)
+#define XLP_SPI_INTR_RXTH BIT(2)
+#define XLP_SPI_INTR_TXUF BIT(3)
+#define XLP_SPI_INTR_RXOF BIT(4)
+
+/* SPI FIFO Threshold Register */
+#define XLP_SPI_FIFO_THRESH 0x14
+
+/* SPI FIFO Word Count Register */
+#define XLP_SPI_FIFO_WCNT 0x18
+#define XLP_SPI_RXFIFO_WCNT_MASK 0xf
+#define XLP_SPI_TXFIFO_WCNT_MASK 0xf0
+#define XLP_SPI_TXFIFO_WCNT_SHIFT 4
+
+/* SPI Transmit Data FIFO Register */
+#define XLP_SPI_TXDATA_FIFO 0x1c
+
+/* SPI Receive Data FIFO Register */
+#define XLP_SPI_RXDATA_FIFO 0x20
+
+/* SPI System Control Register */
+#define XLP_SPI_SYSCTRL 0x100
+#define XLP_SPI_SYS_RESET BIT(0)
+#define XLP_SPI_SYS_CLKDIS BIT(1)
+#define XLP_SPI_SYS_PMEN BIT(8)
+
+#define SPI_CS_OFFSET 0x40
+#define XLP_SPI_TXRXTH 0x80
+#define XLP_SPI_FIFO_SIZE 8
+#define XLP_SPI_MAX_CS 4
+#define XLP_SPI_DEFAULT_FREQ 133333333
+#define XLP_SPI_FDIV_MIN 4
+#define XLP_SPI_FDIV_MAX 65535
+/*
+ * SPI can transfer only 28 bytes properly at a time. So split the
+ * transfer into 28 bytes size.
+ */
+#define XLP_SPI_XFER_SIZE 28
+
+struct xlp_spi_priv {
+ struct device dev; /* device structure */
+ void __iomem *base; /* spi registers base address */
+ const u8 *tx_buf; /* tx data buffer */
+ u8 *rx_buf; /* rx data buffer */
+ int tx_len; /* tx xfer length */
+ int rx_len; /* rx xfer length */
+ int txerrors; /* TXFIFO underflow count */
+ int rxerrors; /* RXFIFO overflow count */
+ int cs; /* slave device chip select */
+ u32 spi_clk; /* spi clock frequency */
+ bool cmd_cont; /* cs active */
+ struct completion done; /* completion notification */
+};
+
+static inline u32 xlp_spi_reg_read(struct xlp_spi_priv *priv,
+ int cs, int regoff)
+{
+ return readl(priv->base + regoff + cs * SPI_CS_OFFSET);
+}
+
+static inline void xlp_spi_reg_write(struct xlp_spi_priv *priv, int cs,
+ int regoff, u32 val)
+{
+ writel(val, priv->base + regoff + cs * SPI_CS_OFFSET);
+}
+
+static inline void xlp_spi_sysctl_write(struct xlp_spi_priv *priv,
+ int regoff, u32 val)
+{
+ writel(val, priv->base + regoff);
+}
+
+/*
+ * Setup global SPI_SYSCTRL register for all SPI channels.
+ */
+static void xlp_spi_sysctl_setup(struct xlp_spi_priv *xspi)
+{
+ int cs;
+
+ for (cs = 0; cs < XLP_SPI_MAX_CS; cs++)
+ xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL,
+ XLP_SPI_SYS_RESET << cs);
+ xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL, XLP_SPI_SYS_PMEN);
+}
+
+static int xlp_spi_setup(struct spi_device *spi)
+{
+ struct xlp_spi_priv *xspi;
+ u32 fdiv, cfg;
+ int cs;
+
+ xspi = spi_master_get_devdata(spi->master);
+ cs = spi->chip_select;
+ /*
+ * The value of fdiv must be between 4 and 65535.
+ */
+ fdiv = DIV_ROUND_UP(xspi->spi_clk, spi->max_speed_hz);
+ if (fdiv > XLP_SPI_FDIV_MAX)
+ fdiv = XLP_SPI_FDIV_MAX;
+ else if (fdiv < XLP_SPI_FDIV_MIN)
+ fdiv = XLP_SPI_FDIV_MIN;
+
+ xlp_spi_reg_write(xspi, cs, XLP_SPI_FDIV, fdiv);
+ xlp_spi_reg_write(xspi, cs, XLP_SPI_FIFO_THRESH, XLP_SPI_TXRXTH);
+ cfg = xlp_spi_reg_read(xspi, cs, XLP_SPI_CONFIG);
+ if (spi->mode & SPI_CPHA)
+ cfg |= XLP_SPI_CPHA;
+ else
+ cfg &= ~XLP_SPI_CPHA;
+ if (spi->mode & SPI_CPOL)
+ cfg |= XLP_SPI_CPOL;
+ else
+ cfg &= ~XLP_SPI_CPOL;
+ if (!(spi->mode & SPI_CS_HIGH))
+ cfg |= XLP_SPI_CS_POL;
+ else
+ cfg &= ~XLP_SPI_CS_POL;
+ if (spi->mode & SPI_LSB_FIRST)
+ cfg |= XLP_SPI_CS_LSBFE;
+ else
+ cfg &= ~XLP_SPI_CS_LSBFE;
+
+ cfg |= XLP_SPI_TXMOSI_EN | XLP_SPI_RXMISO_EN;
+ if (fdiv == 4)
+ cfg |= XLP_SPI_RXCAP_EN;
+ xlp_spi_reg_write(xspi, cs, XLP_SPI_CONFIG, cfg);
+
+ return 0;
+}
+
+static void xlp_spi_read_rxfifo(struct xlp_spi_priv *xspi)
+{
+ u32 rx_data, rxfifo_cnt;
+ int i, j, nbytes;
+
+ rxfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
+ rxfifo_cnt &= XLP_SPI_RXFIFO_WCNT_MASK;
+ while (rxfifo_cnt) {
+ rx_data = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_RXDATA_FIFO);
+ j = 0;
+ nbytes = min(xspi->rx_len, 4);
+ for (i = nbytes - 1; i >= 0; i--, j++)
+ xspi->rx_buf[i] = (rx_data >> (j * 8)) & 0xff;
+
+ xspi->rx_len -= nbytes;
+ xspi->rx_buf += nbytes;
+ rxfifo_cnt--;
+ }
+}
+
+static void xlp_spi_fill_txfifo(struct xlp_spi_priv *xspi)
+{
+ u32 tx_data, txfifo_cnt;
+ int i, j, nbytes;
+
+ txfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
+ txfifo_cnt &= XLP_SPI_TXFIFO_WCNT_MASK;
+ txfifo_cnt >>= XLP_SPI_TXFIFO_WCNT_SHIFT;
+ while (xspi->tx_len && (txfifo_cnt < XLP_SPI_FIFO_SIZE)) {
+ j = 0;
+ tx_data = 0;
+ nbytes = min(xspi->tx_len, 4);
+ for (i = nbytes - 1; i >= 0; i--, j++)
+ tx_data |= xspi->tx_buf[i] << (j * 8);
+
+ xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_TXDATA_FIFO, tx_data);
+ xspi->tx_len -= nbytes;
+ xspi->tx_buf += nbytes;
+ txfifo_cnt++;
+ }
+}
+
+static irqreturn_t xlp_spi_interrupt(int irq, void *dev_id)
+{
+ struct xlp_spi_priv *xspi = dev_id;
+ u32 stat;
+
+ stat = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_STATUS) &
+ XLP_SPI_STAT_MASK;
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & XLP_SPI_TX_INT) {
+ if (xspi->tx_len)
+ xlp_spi_fill_txfifo(xspi);
+ if (stat & XLP_SPI_TX_UF)
+ xspi->txerrors++;
+ }
+
+ if (stat & XLP_SPI_RX_INT) {
+ if (xspi->rx_len)
+ xlp_spi_read_rxfifo(xspi);
+ if (stat & XLP_SPI_RX_OF)
+ xspi->rxerrors++;
+ }
+
+ /* write status back to clear interrupts */
+ xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_STATUS, stat);
+ if (stat & XLP_SPI_XFR_DONE)
+ complete(&xspi->done);
+
+ return IRQ_HANDLED;
+}
+
+static void xlp_spi_send_cmd(struct xlp_spi_priv *xspi, int xfer_len,
+ int cmd_cont)
+{
+ u32 cmd = 0;
+
+ if (xspi->tx_buf)
+ cmd |= XLP_SPI_CMD_TX_MASK;
+ if (xspi->rx_buf)
+ cmd |= XLP_SPI_CMD_RX_MASK;
+ if (cmd_cont)
+ cmd |= XLP_SPI_CMD_CONT;
+ cmd |= ((xfer_len * 8 - 1) << XLP_SPI_XFR_BITCNT_SHIFT);
+ xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_CMD, cmd);
+}
+
+static int xlp_spi_xfer_block(struct xlp_spi_priv *xs,
+ const unsigned char *tx_buf,
+ unsigned char *rx_buf, int xfer_len, int cmd_cont)
+{
+ int timeout;
+ u32 intr_mask = 0;
+
+ xs->tx_buf = tx_buf;
+ xs->rx_buf = rx_buf;
+ xs->tx_len = (xs->tx_buf == NULL) ? 0 : xfer_len;
+ xs->rx_len = (xs->rx_buf == NULL) ? 0 : xfer_len;
+ xs->txerrors = xs->rxerrors = 0;
+
+ /* fill TXDATA_FIFO, then send the CMD */
+ if (xs->tx_len)
+ xlp_spi_fill_txfifo(xs);
+
+ xlp_spi_send_cmd(xs, xfer_len, cmd_cont);
+
+ /*
+ * We are getting some spurious tx interrupts, so avoid enabling
+ * tx interrupts when only rx is in process.
+ * Enable all the interrupts in tx case.
+ */
+ if (xs->tx_len)
+ intr_mask |= XLP_SPI_INTR_TXTH | XLP_SPI_INTR_TXUF |
+ XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
+ else
+ intr_mask |= XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
+
+ intr_mask |= XLP_SPI_INTR_DONE;
+ xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, intr_mask);
+
+ timeout = wait_for_completion_timeout(&xs->done,
+ msecs_to_jiffies(1000));
+ /* Disable interrupts */
+ xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, 0x0);
+ if (!timeout) {
+ dev_err(&xs->dev, "xfer timedout!\n");
+ goto out;
+ }
+ if (xs->txerrors || xs->rxerrors)
+ dev_err(&xs->dev, "Over/Underflow rx %d tx %d xfer %d!\n",
+ xs->rxerrors, xs->txerrors, xfer_len);
+
+ return xfer_len;
+out:
+ return -ETIMEDOUT;
+}
+
+static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t)
+{
+ int bytesleft, sz;
+ unsigned char *rx_buf;
+ const unsigned char *tx_buf;
+
+ tx_buf = t->tx_buf;
+ rx_buf = t->rx_buf;
+ bytesleft = t->len;
+ while (bytesleft) {
+ if (bytesleft > XLP_SPI_XFER_SIZE)
+ sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
+ XLP_SPI_XFER_SIZE, 1);
+ else
+ sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
+ bytesleft, xs->cmd_cont);
+ if (sz < 0)
+ return sz;
+ bytesleft -= sz;
+ if (tx_buf)
+ tx_buf += sz;
+ if (rx_buf)
+ rx_buf += sz;
+ }
+ return bytesleft;
+}
+
+static int xlp_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct xlp_spi_priv *xspi = spi_master_get_devdata(master);
+ int ret = 0;
+
+ xspi->cs = spi->chip_select;
+ xspi->dev = spi->dev;
+
+ if (spi_transfer_is_last(master, t))
+ xspi->cmd_cont = 0;
+ else
+ xspi->cmd_cont = 1;
+
+ if (xlp_spi_txrx_bufs(xspi, t))
+ ret = -EIO;
+
+ spi_finalize_current_transfer(master);
+ return ret;
+}
+
+static int xlp_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct xlp_spi_priv *xspi;
+ struct clk *clk;
+ int irq, err;
+
+ xspi = devm_kzalloc(&pdev->dev, sizeof(*xspi), GFP_KERNEL);
+ if (!xspi)
+ return -ENOMEM;
+
+ xspi->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xspi->base))
+ return PTR_ERR(xspi->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
+ pdev->name, xspi);
+ if (err) {
+ dev_err(&pdev->dev, "unable to request irq %d\n", irq);
+ return err;
+ }
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "could not get spi clock\n");
+ return PTR_ERR(clk);
+ }
+
+ xspi->spi_clk = clk_get_rate(clk);
+
+ master = spi_alloc_master(&pdev->dev, 0);
+ if (!master) {
+ dev_err(&pdev->dev, "could not alloc master\n");
+ return -ENOMEM;
+ }
+
+ master->bus_num = 0;
+ master->num_chipselect = XLP_SPI_MAX_CS;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = xlp_spi_setup;
+ master->transfer_one = xlp_spi_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+
+ init_completion(&xspi->done);
+ spi_master_set_devdata(master, xspi);
+ xlp_spi_sysctl_setup(xspi);
+
+ /* register spi controller */
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err) {
+ dev_err(&pdev->dev, "spi register master failed!\n");
+ spi_master_put(master);
+ return err;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xlp_spi_acpi_match[] = {
+ { "BRCM900D", 0 },
+ { "CAV900D", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, xlp_spi_acpi_match);
+#endif
+
+static struct platform_driver xlp_spi_driver = {
+ .probe = xlp_spi_probe,
+ .driver = {
+ .name = "xlp-spi",
+ .acpi_match_table = ACPI_PTR(xlp_spi_acpi_match),
+ },
+};
+module_platform_driver(xlp_spi_driver);
+
+MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>");
+MODULE_DESCRIPTION("Netlogic XLP SPI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
new file mode 100644
index 000000000..2fa7608f9
--- /dev/null
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Xtensa xtfpga SPI controller driver
+ *
+ * Copyright (c) 2014 Cadence Design Systems Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#define XTFPGA_SPI_NAME "xtfpga_spi"
+
+#define XTFPGA_SPI_START 0x0
+#define XTFPGA_SPI_BUSY 0x4
+#define XTFPGA_SPI_DATA 0x8
+
+#define BUSY_WAIT_US 100
+
+struct xtfpga_spi {
+ struct spi_bitbang bitbang;
+ void __iomem *regs;
+ u32 data;
+ unsigned data_sz;
+};
+
+static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
+ unsigned addr, u32 val)
+{
+ __raw_writel(val, spi->regs + addr);
+}
+
+static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
+ unsigned addr)
+{
+ return __raw_readl(spi->regs + addr);
+}
+
+static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
+{
+ unsigned i;
+
+ for (i = 0; xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY) &&
+ i < BUSY_WAIT_US; ++i)
+ udelay(1);
+ WARN_ON_ONCE(i == BUSY_WAIT_US);
+}
+
+static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
+ u32 v, u8 bits, unsigned flags)
+{
+ struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+
+ xspi->data = (xspi->data << bits) | (v & GENMASK(bits - 1, 0));
+ xspi->data_sz += bits;
+ if (xspi->data_sz >= 16) {
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_DATA,
+ xspi->data >> (xspi->data_sz - 16));
+ xspi->data_sz -= 16;
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 1);
+ xtfpga_spi_wait_busy(xspi);
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
+ }
+
+ return 0;
+}
+
+static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+
+ WARN_ON(xspi->data_sz != 0);
+ xspi->data_sz = 0;
+}
+
+static int xtfpga_spi_probe(struct platform_device *pdev)
+{
+ struct xtfpga_spi *xspi;
+ int ret;
+ struct spi_master *master;
+
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
+ if (!master)
+ return -ENOMEM;
+
+ master->flags = SPI_MASTER_NO_RX;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->bus_num = pdev->dev.id;
+ master->dev.of_node = pdev->dev.of_node;
+
+ xspi = spi_master_get_devdata(master);
+ xspi->bitbang.master = master;
+ xspi->bitbang.chipselect = xtfpga_spi_chipselect;
+ xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
+ xspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xspi->regs))
+ return PTR_ERR(xspi->regs);
+
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
+ usleep_range(1000, 2000);
+ if (xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY)) {
+ dev_err(&pdev->dev, "Device stuck in busy state\n");
+ return -EBUSY;
+ }
+
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_bitbang_start failed\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, master);
+ return 0;
+}
+
+static int xtfpga_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct xtfpga_spi *xspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&xspi->bitbang);
+ spi_master_put(master);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:" XTFPGA_SPI_NAME);
+
+#ifdef CONFIG_OF
+static const struct of_device_id xtfpga_spi_of_match[] = {
+ { .compatible = "cdns,xtfpga-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xtfpga_spi_of_match);
+#endif
+
+static struct platform_driver xtfpga_spi_driver = {
+ .probe = xtfpga_spi_probe,
+ .remove = xtfpga_spi_remove,
+ .driver = {
+ .name = XTFPGA_SPI_NAME,
+ .of_match_table = of_match_ptr(xtfpga_spi_of_match),
+ },
+};
+module_platform_driver(xtfpga_spi_driver);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_DESCRIPTION("xtensa xtfpga SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
new file mode 100644
index 000000000..78f31b61a
--- /dev/null
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi-mem.h>
+
+/* Register offset definitions */
+#define ZYNQ_QSPI_CONFIG_OFFSET 0x00 /* Configuration Register, RW */
+#define ZYNQ_QSPI_STATUS_OFFSET 0x04 /* Interrupt Status Register, RO */
+#define ZYNQ_QSPI_IEN_OFFSET 0x08 /* Interrupt Enable Register, WO */
+#define ZYNQ_QSPI_IDIS_OFFSET 0x0C /* Interrupt Disable Reg, WO */
+#define ZYNQ_QSPI_IMASK_OFFSET 0x10 /* Interrupt Enabled Mask Reg,RO */
+#define ZYNQ_QSPI_ENABLE_OFFSET 0x14 /* Enable/Disable Register, RW */
+#define ZYNQ_QSPI_DELAY_OFFSET 0x18 /* Delay Register, RW */
+#define ZYNQ_QSPI_TXD_00_00_OFFSET 0x1C /* Transmit 4-byte inst, WO */
+#define ZYNQ_QSPI_TXD_00_01_OFFSET 0x80 /* Transmit 1-byte inst, WO */
+#define ZYNQ_QSPI_TXD_00_10_OFFSET 0x84 /* Transmit 2-byte inst, WO */
+#define ZYNQ_QSPI_TXD_00_11_OFFSET 0x88 /* Transmit 3-byte inst, WO */
+#define ZYNQ_QSPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
+#define ZYNQ_QSPI_SIC_OFFSET 0x24 /* Slave Idle Count Register, RW */
+#define ZYNQ_QSPI_TX_THRESH_OFFSET 0x28 /* TX FIFO Watermark Reg, RW */
+#define ZYNQ_QSPI_RX_THRESH_OFFSET 0x2C /* RX FIFO Watermark Reg, RW */
+#define ZYNQ_QSPI_GPIO_OFFSET 0x30 /* GPIO Register, RW */
+#define ZYNQ_QSPI_LINEAR_CFG_OFFSET 0xA0 /* Linear Adapter Config Ref, RW */
+#define ZYNQ_QSPI_MOD_ID_OFFSET 0xFC /* Module ID Register, RO */
+
+/*
+ * QSPI Configuration Register bit Masks
+ *
+ * This register contains various control bits that effect the operation
+ * of the QSPI controller
+ */
+#define ZYNQ_QSPI_CONFIG_IFMODE_MASK BIT(31) /* Flash Memory Interface */
+#define ZYNQ_QSPI_CONFIG_MANSRT_MASK BIT(16) /* Manual TX Start */
+#define ZYNQ_QSPI_CONFIG_MANSRTEN_MASK BIT(15) /* Enable Manual TX Mode */
+#define ZYNQ_QSPI_CONFIG_SSFORCE_MASK BIT(14) /* Manual Chip Select */
+#define ZYNQ_QSPI_CONFIG_BDRATE_MASK GENMASK(5, 3) /* Baud Rate Mask */
+#define ZYNQ_QSPI_CONFIG_CPHA_MASK BIT(2) /* Clock Phase Control */
+#define ZYNQ_QSPI_CONFIG_CPOL_MASK BIT(1) /* Clock Polarity Control */
+#define ZYNQ_QSPI_CONFIG_FWIDTH_MASK GENMASK(7, 6) /* FIFO width */
+#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */
+
+/*
+ * QSPI Configuration Register - Baud rate and slave select
+ *
+ * These are the values used in the calculation of baud rate divisor and
+ * setting the slave select.
+ */
+#define ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
+#define ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift */
+#define ZYNQ_QSPI_CONFIG_PCS BIT(10) /* Peripheral Chip Select */
+
+/*
+ * QSPI Interrupt Registers bit Masks
+ *
+ * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
+ * bit definitions.
+ */
+#define ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK BIT(0) /* QSPI RX FIFO Overflow */
+#define ZYNQ_QSPI_IXR_TXNFULL_MASK BIT(2) /* QSPI TX FIFO Overflow */
+#define ZYNQ_QSPI_IXR_TXFULL_MASK BIT(3) /* QSPI TX FIFO is full */
+#define ZYNQ_QSPI_IXR_RXNEMTY_MASK BIT(4) /* QSPI RX FIFO Not Empty */
+#define ZYNQ_QSPI_IXR_RXF_FULL_MASK BIT(5) /* QSPI RX FIFO is full */
+#define ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK BIT(6) /* QSPI TX FIFO Underflow */
+#define ZYNQ_QSPI_IXR_ALL_MASK (ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK | \
+ ZYNQ_QSPI_IXR_TXNFULL_MASK | \
+ ZYNQ_QSPI_IXR_TXFULL_MASK | \
+ ZYNQ_QSPI_IXR_RXNEMTY_MASK | \
+ ZYNQ_QSPI_IXR_RXF_FULL_MASK | \
+ ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK)
+#define ZYNQ_QSPI_IXR_RXTX_MASK (ZYNQ_QSPI_IXR_TXNFULL_MASK | \
+ ZYNQ_QSPI_IXR_RXNEMTY_MASK)
+
+/*
+ * QSPI Enable Register bit Masks
+ *
+ * This register is used to enable or disable the QSPI controller
+ */
+#define ZYNQ_QSPI_ENABLE_ENABLE_MASK BIT(0) /* QSPI Enable Bit Mask */
+
+/*
+ * QSPI Linear Configuration Register
+ *
+ * It is named Linear Configuration but it controls other modes when not in
+ * linear mode also.
+ */
+#define ZYNQ_QSPI_LCFG_TWO_MEM BIT(30) /* LQSPI Two memories */
+#define ZYNQ_QSPI_LCFG_SEP_BUS BIT(29) /* LQSPI Separate bus */
+#define ZYNQ_QSPI_LCFG_U_PAGE BIT(28) /* LQSPI Upper Page */
+
+#define ZYNQ_QSPI_LCFG_DUMMY_SHIFT 8
+
+#define ZYNQ_QSPI_FAST_READ_QOUT_CODE 0x6B /* read instruction code */
+#define ZYNQ_QSPI_FIFO_DEPTH 63 /* FIFO depth in words */
+#define ZYNQ_QSPI_RX_THRESHOLD 32 /* Rx FIFO threshold level */
+#define ZYNQ_QSPI_TX_THRESHOLD 1 /* Tx FIFO threshold level */
+
+/*
+ * The modebits configurable by the driver to make the SPI support different
+ * data formats
+ */
+#define ZYNQ_QSPI_MODEBITS (SPI_CPOL | SPI_CPHA)
+
+/* Maximum number of chip selects */
+#define ZYNQ_QSPI_MAX_NUM_CS 2
+
+/**
+ * struct zynq_qspi - Defines qspi driver instance
+ * @dev: Pointer to the this device's information
+ * @regs: Virtual address of the QSPI controller registers
+ * @refclk: Pointer to the peripheral clock
+ * @pclk: Pointer to the APB clock
+ * @irq: IRQ number
+ * @txbuf: Pointer to the TX buffer
+ * @rxbuf: Pointer to the RX buffer
+ * @tx_bytes: Number of bytes left to transfer
+ * @rx_bytes: Number of bytes left to receive
+ * @data_completion: completion structure
+ */
+struct zynq_qspi {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *refclk;
+ struct clk *pclk;
+ int irq;
+ u8 *txbuf;
+ u8 *rxbuf;
+ int tx_bytes;
+ int rx_bytes;
+ struct completion data_completion;
+};
+
+/*
+ * Inline functions for the QSPI controller read/write
+ */
+static inline u32 zynq_qspi_read(struct zynq_qspi *xqspi, u32 offset)
+{
+ return readl_relaxed(xqspi->regs + offset);
+}
+
+static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
+ u32 val)
+{
+ writel_relaxed(val, xqspi->regs + offset);
+}
+
+/**
+ * zynq_qspi_init_hw - Initialize the hardware
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @num_cs: Number of connected CS (to enable dual memories if needed)
+ *
+ * The default settings of the QSPI controller's configurable parameters on
+ * reset are
+ * - Master mode
+ * - Baud rate divisor is set to 2
+ * - Tx threshold set to 1l Rx threshold set to 32
+ * - Flash memory interface mode enabled
+ * - Size of the word to be transferred as 8 bit
+ * This function performs the following actions
+ * - Disable and clear all the interrupts
+ * - Enable manual slave select
+ * - Enable manual start
+ * - Deselect all the chip select lines
+ * - Set the size of the word to be transferred as 32 bit
+ * - Set the little endian mode of TX FIFO and
+ * - Enable the QSPI controller
+ */
+static void zynq_qspi_init_hw(struct zynq_qspi *xqspi, unsigned int num_cs)
+{
+ u32 config_reg;
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
+
+ /* Disable linear mode as the boot loader may have used it */
+ config_reg = 0;
+ /* At the same time, enable dual mode if more than 1 CS is available */
+ if (num_cs > 1)
+ config_reg |= ZYNQ_QSPI_LCFG_TWO_MEM;
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
+
+ /* Clear the RX FIFO */
+ while (zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET) &
+ ZYNQ_QSPI_IXR_RXNEMTY_MASK)
+ zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+ config_reg &= ~(ZYNQ_QSPI_CONFIG_MSTREN_MASK |
+ ZYNQ_QSPI_CONFIG_CPOL_MASK |
+ ZYNQ_QSPI_CONFIG_CPHA_MASK |
+ ZYNQ_QSPI_CONFIG_BDRATE_MASK |
+ ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
+ ZYNQ_QSPI_CONFIG_MANSRTEN_MASK |
+ ZYNQ_QSPI_CONFIG_MANSRT_MASK);
+ config_reg |= (ZYNQ_QSPI_CONFIG_MSTREN_MASK |
+ ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
+ ZYNQ_QSPI_CONFIG_FWIDTH_MASK |
+ ZYNQ_QSPI_CONFIG_IFMODE_MASK);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_RX_THRESH_OFFSET,
+ ZYNQ_QSPI_RX_THRESHOLD);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_TX_THRESH_OFFSET,
+ ZYNQ_QSPI_TX_THRESHOLD);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
+ ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+}
+
+static bool zynq_qspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ /*
+ * The number of address bytes should be equal to or less than 3 bytes.
+ */
+ if (op->addr.nbytes > 3)
+ return false;
+
+ return true;
+}
+
+/**
+ * zynq_qspi_rxfifo_op - Read 1..4 bytes from RxFIFO to RX buffer
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @size: Number of bytes to be read (1..4)
+ */
+static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
+{
+ u32 data;
+
+ data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
+
+ if (xqspi->rxbuf) {
+ memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - size, size);
+ xqspi->rxbuf += size;
+ }
+
+ xqspi->rx_bytes -= size;
+ if (xqspi->rx_bytes < 0)
+ xqspi->rx_bytes = 0;
+}
+
+/**
+ * zynq_qspi_txfifo_op - Write 1..4 bytes from TX buffer to TxFIFO
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @size: Number of bytes to be written (1..4)
+ */
+static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
+{
+ static const unsigned int offset[4] = {
+ ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET,
+ ZYNQ_QSPI_TXD_00_11_OFFSET, ZYNQ_QSPI_TXD_00_00_OFFSET };
+ u32 data;
+
+ if (xqspi->txbuf) {
+ data = 0xffffffff;
+ memcpy(&data, xqspi->txbuf, size);
+ xqspi->txbuf += size;
+ } else {
+ data = 0;
+ }
+
+ xqspi->tx_bytes -= size;
+ zynq_qspi_write(xqspi, offset[size - 1], data);
+}
+
+/**
+ * zynq_qspi_chipselect - Select or deselect the chip select line
+ * @spi: Pointer to the spi_device structure
+ * @assert: 1 for select or 0 for deselect the chip select line
+ */
+static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
+{
+ struct spi_controller *ctlr = spi->master;
+ struct zynq_qspi *xqspi = spi_controller_get_devdata(ctlr);
+ u32 config_reg;
+
+ /* Select the lower (CS0) or upper (CS1) memory */
+ if (ctlr->num_chipselect > 1) {
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET);
+ if (!spi->chip_select)
+ config_reg &= ~ZYNQ_QSPI_LCFG_U_PAGE;
+ else
+ config_reg |= ZYNQ_QSPI_LCFG_U_PAGE;
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
+ }
+
+ /* Ground the line to assert the CS */
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+ if (assert)
+ config_reg &= ~ZYNQ_QSPI_CONFIG_PCS;
+ else
+ config_reg |= ZYNQ_QSPI_CONFIG_PCS;
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
+}
+
+/**
+ * zynq_qspi_config_op - Configure QSPI controller for specified transfer
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @spi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI transfer and
+ * sets the requested clock frequency.
+ *
+ * Return: 0 on success and -EINVAL on invalid input parameter
+ *
+ * Note: If the requested frequency is not an exact match with what can be
+ * obtained using the prescalar value, the driver sets the clock frequency which
+ * is lower than the requested frequency (maximum lower) for the transfer. If
+ * the requested frequency is higher or lower than that is supported by the QSPI
+ * controller the driver will set the highest or lowest frequency supported by
+ * controller.
+ */
+static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
+{
+ u32 config_reg, baud_rate_val = 0;
+
+ /*
+ * Set the clock frequency
+ * The baud rate divisor is not a direct mapping to the value written
+ * into the configuration register (config_reg[5:3])
+ * i.e. 000 - divide by 2
+ * 001 - divide by 4
+ * ----------------
+ * 111 - divide by 256
+ */
+ while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) &&
+ (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
+ spi->max_speed_hz)
+ baud_rate_val++;
+
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= (~ZYNQ_QSPI_CONFIG_CPHA_MASK) &
+ (~ZYNQ_QSPI_CONFIG_CPOL_MASK);
+ if (spi->mode & SPI_CPHA)
+ config_reg |= ZYNQ_QSPI_CONFIG_CPHA_MASK;
+ if (spi->mode & SPI_CPOL)
+ config_reg |= ZYNQ_QSPI_CONFIG_CPOL_MASK;
+
+ config_reg &= ~ZYNQ_QSPI_CONFIG_BDRATE_MASK;
+ config_reg |= (baud_rate_val << ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
+
+ return 0;
+}
+
+/**
+ * zynq_qspi_setup_op - Configure the QSPI controller
+ * @spi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI transfer, baud
+ * rate and divisor value to setup the requested qspi clock.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_qspi_setup_op(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->master;
+ struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
+
+ if (ctlr->busy)
+ return -EBUSY;
+
+ clk_enable(qspi->refclk);
+ clk_enable(qspi->pclk);
+ zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
+ ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+
+ return 0;
+}
+
+/**
+ * zynq_qspi_write_op - Fills the TX FIFO with as many bytes as possible
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @txcount: Maximum number of words to write
+ * @txempty: Indicates that TxFIFO is empty
+ */
+static void zynq_qspi_write_op(struct zynq_qspi *xqspi, int txcount,
+ bool txempty)
+{
+ int count, len, k;
+
+ len = xqspi->tx_bytes;
+ if (len && len < 4) {
+ /*
+ * We must empty the TxFIFO between accesses to TXD0,
+ * TXD1, TXD2, TXD3.
+ */
+ if (txempty)
+ zynq_qspi_txfifo_op(xqspi, len);
+
+ return;
+ }
+
+ count = len / 4;
+ if (count > txcount)
+ count = txcount;
+
+ if (xqspi->txbuf) {
+ iowrite32_rep(xqspi->regs + ZYNQ_QSPI_TXD_00_00_OFFSET,
+ xqspi->txbuf, count);
+ xqspi->txbuf += count * 4;
+ } else {
+ for (k = 0; k < count; k++)
+ writel_relaxed(0, xqspi->regs +
+ ZYNQ_QSPI_TXD_00_00_OFFSET);
+ }
+
+ xqspi->tx_bytes -= count * 4;
+}
+
+/**
+ * zynq_qspi_read_op - Drains the RX FIFO by as many bytes as possible
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @rxcount: Maximum number of words to read
+ */
+static void zynq_qspi_read_op(struct zynq_qspi *xqspi, int rxcount)
+{
+ int count, len, k;
+
+ len = xqspi->rx_bytes - xqspi->tx_bytes;
+ count = len / 4;
+ if (count > rxcount)
+ count = rxcount;
+ if (xqspi->rxbuf) {
+ ioread32_rep(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET,
+ xqspi->rxbuf, count);
+ xqspi->rxbuf += count * 4;
+ } else {
+ for (k = 0; k < count; k++)
+ readl_relaxed(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET);
+ }
+ xqspi->rx_bytes -= count * 4;
+ len -= count * 4;
+
+ if (len && len < 4 && count < rxcount)
+ zynq_qspi_rxfifo_op(xqspi, len);
+}
+
+/**
+ * zynq_qspi_irq - Interrupt service routine of the QSPI controller
+ * @irq: IRQ number
+ * @dev_id: Pointer to the xqspi structure
+ *
+ * This function handles TX empty only.
+ * On TX empty interrupt this function reads the received data from RX FIFO and
+ * fills the TX FIFO if there is any data remaining to be transferred.
+ *
+ * Return: IRQ_HANDLED when interrupt is handled; IRQ_NONE otherwise.
+ */
+static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
+{
+ u32 intr_status;
+ bool txempty;
+ struct zynq_qspi *xqspi = (struct zynq_qspi *)dev_id;
+
+ intr_status = zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, intr_status);
+
+ if ((intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK) ||
+ (intr_status & ZYNQ_QSPI_IXR_RXNEMTY_MASK)) {
+ /*
+ * This bit is set when Tx FIFO has < THRESHOLD entries.
+ * We have the THRESHOLD value set to 1,
+ * so this bit indicates Tx FIFO is empty.
+ */
+ txempty = !!(intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK);
+ /* Read out the data from the RX FIFO */
+ zynq_qspi_read_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD);
+ if (xqspi->tx_bytes) {
+ /* There is more data to send */
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD,
+ txempty);
+ } else {
+ /*
+ * If transfer and receive is completed then only send
+ * complete signal.
+ */
+ if (!xqspi->rx_bytes) {
+ zynq_qspi_write(xqspi,
+ ZYNQ_QSPI_IDIS_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ complete(&xqspi->data_completion);
+ }
+ }
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/**
+ * zynq_qspi_exec_mem_op() - Initiates the QSPI transfer
+ * @mem: the SPI memory
+ * @op: the memory operation to execute
+ *
+ * Executes a memory operation.
+ *
+ * This function first selects the chip and starts the memory operation.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
+ int err = 0, i;
+ u8 *tmpbuf;
+
+ dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth);
+
+ zynq_qspi_chipselect(mem->spi, true);
+ zynq_qspi_config_op(xqspi, mem->spi);
+
+ if (op->cmd.opcode) {
+ reinit_completion(&xqspi->data_completion);
+ xqspi->txbuf = (u8 *)&op->cmd.opcode;
+ xqspi->rxbuf = NULL;
+ xqspi->tx_bytes = op->cmd.nbytes;
+ xqspi->rx_bytes = op->cmd.nbytes;
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++) {
+ xqspi->txbuf[i] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+ }
+
+ reinit_completion(&xqspi->data_completion);
+ xqspi->rxbuf = NULL;
+ xqspi->tx_bytes = op->addr.nbytes;
+ xqspi->rx_bytes = op->addr.nbytes;
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+
+ if (op->dummy.nbytes) {
+ tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ memset(tmpbuf, 0xff, op->dummy.nbytes);
+ reinit_completion(&xqspi->data_completion);
+ xqspi->txbuf = tmpbuf;
+ xqspi->rxbuf = NULL;
+ xqspi->tx_bytes = op->dummy.nbytes;
+ xqspi->rx_bytes = op->dummy.nbytes;
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+ kfree(tmpbuf);
+ }
+
+ if (op->data.nbytes) {
+ reinit_completion(&xqspi->data_completion);
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ xqspi->txbuf = (u8 *)op->data.buf.out;
+ xqspi->tx_bytes = op->data.nbytes;
+ xqspi->rxbuf = NULL;
+ xqspi->rx_bytes = op->data.nbytes;
+ } else {
+ xqspi->txbuf = NULL;
+ xqspi->rxbuf = (u8 *)op->data.buf.in;
+ xqspi->rx_bytes = op->data.nbytes;
+ xqspi->tx_bytes = op->data.nbytes;
+ }
+
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+ zynq_qspi_chipselect(mem->spi, false);
+
+ return err;
+}
+
+static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
+ .supports_op = zynq_qspi_supports_op,
+ .exec_op = zynq_qspi_exec_mem_op,
+};
+
+/**
+ * zynq_qspi_probe - Probe method for the QSPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_qspi_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct spi_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct zynq_qspi *xqspi;
+ u32 num_cs;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ xqspi = spi_controller_get_devdata(ctlr);
+ xqspi->dev = dev;
+ platform_set_drvdata(pdev, xqspi);
+ xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xqspi->regs)) {
+ ret = PTR_ERR(xqspi->regs);
+ goto remove_master;
+ }
+
+ xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(xqspi->pclk)) {
+ dev_err(&pdev->dev, "pclk clock not found.\n");
+ ret = PTR_ERR(xqspi->pclk);
+ goto remove_master;
+ }
+
+ init_completion(&xqspi->data_completion);
+
+ xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(xqspi->refclk)) {
+ dev_err(&pdev->dev, "ref_clk clock not found.\n");
+ ret = PTR_ERR(xqspi->refclk);
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable APB clock.\n");
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xqspi->refclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto clk_dis_pclk;
+ }
+
+ xqspi->irq = platform_get_irq(pdev, 0);
+ if (xqspi->irq <= 0) {
+ ret = -ENXIO;
+ goto clk_dis_all;
+ }
+ ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
+ 0, pdev->name, xqspi);
+ if (ret != 0) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto clk_dis_all;
+ }
+
+ ret = of_property_read_u32(np, "num-cs",
+ &num_cs);
+ if (ret < 0) {
+ ctlr->num_chipselect = 1;
+ } else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "only 2 chip selects are available\n");
+ goto clk_dis_all;
+ } else {
+ ctlr->num_chipselect = num_cs;
+ }
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->mem_ops = &zynq_qspi_mem_ops;
+ ctlr->setup = zynq_qspi_setup_op;
+ ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
+ ctlr->dev.of_node = np;
+
+ /* QSPI controller initializations */
+ zynq_qspi_init_hw(xqspi, ctlr->num_chipselect);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto clk_dis_all;
+ }
+
+ return ret;
+
+clk_dis_all:
+ clk_disable_unprepare(xqspi->refclk);
+clk_dis_pclk:
+ clk_disable_unprepare(xqspi->pclk);
+remove_master:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+/**
+ * zynq_qspi_remove - Remove method for the QSPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_qspi_remove(struct platform_device *pdev)
+{
+ struct zynq_qspi *xqspi = platform_get_drvdata(pdev);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
+
+ clk_disable_unprepare(xqspi->refclk);
+ clk_disable_unprepare(xqspi->pclk);
+
+ return 0;
+}
+
+static const struct of_device_id zynq_qspi_of_match[] = {
+ { .compatible = "xlnx,zynq-qspi-1.0", },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynq_qspi_of_match);
+
+/*
+ * zynq_qspi_driver - This structure defines the QSPI platform driver
+ */
+static struct platform_driver zynq_qspi_driver = {
+ .probe = zynq_qspi_probe,
+ .remove = zynq_qspi_remove,
+ .driver = {
+ .name = "zynq-qspi",
+ .of_match_table = zynq_qspi_of_match,
+ },
+};
+
+module_platform_driver(zynq_qspi_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Zynq QSPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
new file mode 100644
index 000000000..3b56d5e70
--- /dev/null
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -0,0 +1,1281 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Xilinx Zynq UltraScale+ MPSoC Quad-SPI (QSPI) controller driver
+ * (master mode only)
+ *
+ * Copyright (C) 2009 - 2015 Xilinx, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi-mem.h>
+
+/* Generic QSPI register offsets */
+#define GQSPI_CONFIG_OFST 0x00000100
+#define GQSPI_ISR_OFST 0x00000104
+#define GQSPI_IDR_OFST 0x0000010C
+#define GQSPI_IER_OFST 0x00000108
+#define GQSPI_IMASK_OFST 0x00000110
+#define GQSPI_EN_OFST 0x00000114
+#define GQSPI_TXD_OFST 0x0000011C
+#define GQSPI_RXD_OFST 0x00000120
+#define GQSPI_TX_THRESHOLD_OFST 0x00000128
+#define GQSPI_RX_THRESHOLD_OFST 0x0000012C
+#define GQSPI_LPBK_DLY_ADJ_OFST 0x00000138
+#define GQSPI_GEN_FIFO_OFST 0x00000140
+#define GQSPI_SEL_OFST 0x00000144
+#define GQSPI_GF_THRESHOLD_OFST 0x00000150
+#define GQSPI_FIFO_CTRL_OFST 0x0000014C
+#define GQSPI_QSPIDMA_DST_CTRL_OFST 0x0000080C
+#define GQSPI_QSPIDMA_DST_SIZE_OFST 0x00000804
+#define GQSPI_QSPIDMA_DST_STS_OFST 0x00000808
+#define GQSPI_QSPIDMA_DST_I_STS_OFST 0x00000814
+#define GQSPI_QSPIDMA_DST_I_EN_OFST 0x00000818
+#define GQSPI_QSPIDMA_DST_I_DIS_OFST 0x0000081C
+#define GQSPI_QSPIDMA_DST_I_MASK_OFST 0x00000820
+#define GQSPI_QSPIDMA_DST_ADDR_OFST 0x00000800
+#define GQSPI_QSPIDMA_DST_ADDR_MSB_OFST 0x00000828
+
+/* GQSPI register bit masks */
+#define GQSPI_SEL_MASK 0x00000001
+#define GQSPI_EN_MASK 0x00000001
+#define GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK 0x00000020
+#define GQSPI_ISR_WR_TO_CLR_MASK 0x00000002
+#define GQSPI_IDR_ALL_MASK 0x00000FBE
+#define GQSPI_CFG_MODE_EN_MASK 0xC0000000
+#define GQSPI_CFG_GEN_FIFO_START_MODE_MASK 0x20000000
+#define GQSPI_CFG_ENDIAN_MASK 0x04000000
+#define GQSPI_CFG_EN_POLL_TO_MASK 0x00100000
+#define GQSPI_CFG_WP_HOLD_MASK 0x00080000
+#define GQSPI_CFG_BAUD_RATE_DIV_MASK 0x00000038
+#define GQSPI_CFG_CLK_PHA_MASK 0x00000004
+#define GQSPI_CFG_CLK_POL_MASK 0x00000002
+#define GQSPI_CFG_START_GEN_FIFO_MASK 0x10000000
+#define GQSPI_GENFIFO_IMM_DATA_MASK 0x000000FF
+#define GQSPI_GENFIFO_DATA_XFER 0x00000100
+#define GQSPI_GENFIFO_EXP 0x00000200
+#define GQSPI_GENFIFO_MODE_SPI 0x00000400
+#define GQSPI_GENFIFO_MODE_DUALSPI 0x00000800
+#define GQSPI_GENFIFO_MODE_QUADSPI 0x00000C00
+#define GQSPI_GENFIFO_MODE_MASK 0x00000C00
+#define GQSPI_GENFIFO_CS_LOWER 0x00001000
+#define GQSPI_GENFIFO_CS_UPPER 0x00002000
+#define GQSPI_GENFIFO_BUS_LOWER 0x00004000
+#define GQSPI_GENFIFO_BUS_UPPER 0x00008000
+#define GQSPI_GENFIFO_BUS_BOTH 0x0000C000
+#define GQSPI_GENFIFO_BUS_MASK 0x0000C000
+#define GQSPI_GENFIFO_TX 0x00010000
+#define GQSPI_GENFIFO_RX 0x00020000
+#define GQSPI_GENFIFO_STRIPE 0x00040000
+#define GQSPI_GENFIFO_POLL 0x00080000
+#define GQSPI_GENFIFO_EXP_START 0x00000100
+#define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004
+#define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002
+#define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001
+#define GQSPI_ISR_RXEMPTY_MASK 0x00000800
+#define GQSPI_ISR_GENFIFOFULL_MASK 0x00000400
+#define GQSPI_ISR_GENFIFONOT_FULL_MASK 0x00000200
+#define GQSPI_ISR_TXEMPTY_MASK 0x00000100
+#define GQSPI_ISR_GENFIFOEMPTY_MASK 0x00000080
+#define GQSPI_ISR_RXFULL_MASK 0x00000020
+#define GQSPI_ISR_RXNEMPTY_MASK 0x00000010
+#define GQSPI_ISR_TXFULL_MASK 0x00000008
+#define GQSPI_ISR_TXNOT_FULL_MASK 0x00000004
+#define GQSPI_ISR_POLL_TIME_EXPIRE_MASK 0x00000002
+#define GQSPI_IER_TXNOT_FULL_MASK 0x00000004
+#define GQSPI_IER_RXEMPTY_MASK 0x00000800
+#define GQSPI_IER_POLL_TIME_EXPIRE_MASK 0x00000002
+#define GQSPI_IER_RXNEMPTY_MASK 0x00000010
+#define GQSPI_IER_GENFIFOEMPTY_MASK 0x00000080
+#define GQSPI_IER_TXEMPTY_MASK 0x00000100
+#define GQSPI_QSPIDMA_DST_INTR_ALL_MASK 0x000000FE
+#define GQSPI_QSPIDMA_DST_STS_WTC 0x0000E000
+#define GQSPI_CFG_MODE_EN_DMA_MASK 0x80000000
+#define GQSPI_ISR_IDR_MASK 0x00000994
+#define GQSPI_QSPIDMA_DST_I_EN_DONE_MASK 0x00000002
+#define GQSPI_QSPIDMA_DST_I_STS_DONE_MASK 0x00000002
+#define GQSPI_IRQ_MASK 0x00000980
+
+#define GQSPI_CFG_BAUD_RATE_DIV_SHIFT 3
+#define GQSPI_GENFIFO_CS_SETUP 0x4
+#define GQSPI_GENFIFO_CS_HOLD 0x3
+#define GQSPI_TXD_DEPTH 64
+#define GQSPI_RX_FIFO_THRESHOLD 32
+#define GQSPI_RX_FIFO_FILL (GQSPI_RX_FIFO_THRESHOLD * 4)
+#define GQSPI_TX_FIFO_THRESHOLD_RESET_VAL 32
+#define GQSPI_TX_FIFO_FILL (GQSPI_TXD_DEPTH -\
+ GQSPI_TX_FIFO_THRESHOLD_RESET_VAL)
+#define GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL 0X10
+#define GQSPI_QSPIDMA_DST_CTRL_RESET_VAL 0x803FFA00
+#define GQSPI_SELECT_FLASH_CS_LOWER 0x1
+#define GQSPI_SELECT_FLASH_CS_UPPER 0x2
+#define GQSPI_SELECT_FLASH_CS_BOTH 0x3
+#define GQSPI_SELECT_FLASH_BUS_LOWER 0x1
+#define GQSPI_SELECT_FLASH_BUS_UPPER 0x2
+#define GQSPI_SELECT_FLASH_BUS_BOTH 0x3
+#define GQSPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
+#define GQSPI_BAUD_DIV_SHIFT 2 /* Baud rate divisor shift */
+#define GQSPI_SELECT_MODE_SPI 0x1
+#define GQSPI_SELECT_MODE_DUALSPI 0x2
+#define GQSPI_SELECT_MODE_QUADSPI 0x4
+#define GQSPI_DMA_UNALIGN 0x3
+#define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */
+
+#define GQSPI_MAX_NUM_CS 2 /* Maximum number of chip selects */
+
+#define SPI_AUTOSUSPEND_TIMEOUT 3000
+enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
+
+/**
+ * struct zynqmp_qspi - Defines qspi driver instance
+ * @regs: Virtual address of the QSPI controller registers
+ * @refclk: Pointer to the peripheral clock
+ * @pclk: Pointer to the APB clock
+ * @irq: IRQ number
+ * @dev: Pointer to struct device
+ * @txbuf: Pointer to the TX buffer
+ * @rxbuf: Pointer to the RX buffer
+ * @bytes_to_transfer: Number of bytes left to transfer
+ * @bytes_to_receive: Number of bytes left to receive
+ * @genfifocs: Used for chip select
+ * @genfifobus: Used to select the upper or lower bus
+ * @dma_rx_bytes: Remaining bytes to receive by DMA mode
+ * @dma_addr: DMA address after mapping the kernel buffer
+ * @genfifoentry: Used for storing the genfifoentry instruction.
+ * @mode: Defines the mode in which QSPI is operating
+ * @data_completion: completion structure
+ */
+struct zynqmp_qspi {
+ struct spi_controller *ctlr;
+ void __iomem *regs;
+ struct clk *refclk;
+ struct clk *pclk;
+ int irq;
+ struct device *dev;
+ const void *txbuf;
+ void *rxbuf;
+ int bytes_to_transfer;
+ int bytes_to_receive;
+ u32 genfifocs;
+ u32 genfifobus;
+ u32 dma_rx_bytes;
+ dma_addr_t dma_addr;
+ u32 genfifoentry;
+ enum mode_type mode;
+ struct completion data_completion;
+ struct mutex op_lock;
+};
+
+/**
+ * zynqmp_gqspi_read - For GQSPI controller read operation
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @offset: Offset from where to read
+ * Return: Value at the offset
+ */
+static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
+{
+ return readl_relaxed(xqspi->regs + offset);
+}
+
+/**
+ * zynqmp_gqspi_write - For GQSPI controller write operation
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @offset: Offset where to write
+ * @val: Value to be written
+ */
+static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
+ u32 val)
+{
+ writel_relaxed(val, (xqspi->regs + offset));
+}
+
+/**
+ * zynqmp_gqspi_selectslave - For selection of slave device
+ * @instanceptr: Pointer to the zynqmp_qspi structure
+ * @slavecs: For chip select
+ * @slavebus: To check which bus is selected- upper or lower
+ */
+static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
+ u8 slavecs, u8 slavebus)
+{
+ /*
+ * Bus and CS lines selected here will be updated in the instance and
+ * used for subsequent GENFIFO entries during transfer.
+ */
+
+ /* Choose slave select line */
+ switch (slavecs) {
+ case GQSPI_SELECT_FLASH_CS_BOTH:
+ instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
+ GQSPI_GENFIFO_CS_UPPER;
+ break;
+ case GQSPI_SELECT_FLASH_CS_UPPER:
+ instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
+ break;
+ case GQSPI_SELECT_FLASH_CS_LOWER:
+ instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER;
+ break;
+ default:
+ dev_warn(instanceptr->dev, "Invalid slave select\n");
+ }
+
+ /* Choose the bus */
+ switch (slavebus) {
+ case GQSPI_SELECT_FLASH_BUS_BOTH:
+ instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER |
+ GQSPI_GENFIFO_BUS_UPPER;
+ break;
+ case GQSPI_SELECT_FLASH_BUS_UPPER:
+ instanceptr->genfifobus = GQSPI_GENFIFO_BUS_UPPER;
+ break;
+ case GQSPI_SELECT_FLASH_BUS_LOWER:
+ instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
+ break;
+ default:
+ dev_warn(instanceptr->dev, "Invalid slave bus\n");
+ }
+}
+
+/**
+ * zynqmp_qspi_init_hw - Initialize the hardware
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ *
+ * The default settings of the QSPI controller's configurable parameters on
+ * reset are
+ * - Master mode
+ * - TX threshold set to 1
+ * - RX threshold set to 1
+ * - Flash memory interface mode enabled
+ * This function performs the following actions
+ * - Disable and clear all the interrupts
+ * - Enable manual slave select
+ * - Enable manual start
+ * - Deselect all the chip select lines
+ * - Set the little endian mode of TX FIFO and
+ * - Enable the QSPI controller
+ */
+static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg;
+
+ /* Select the GQSPI mode */
+ zynqmp_gqspi_write(xqspi, GQSPI_SEL_OFST, GQSPI_SEL_MASK);
+ /* Clear and disable interrupts */
+ zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST) |
+ GQSPI_ISR_WR_TO_CLR_MASK);
+ /* Clear the DMA STS */
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
+ zynqmp_gqspi_read(xqspi,
+ GQSPI_QSPIDMA_DST_I_STS_OFST));
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_STS_OFST,
+ zynqmp_gqspi_read(xqspi,
+ GQSPI_QSPIDMA_DST_STS_OFST) |
+ GQSPI_QSPIDMA_DST_STS_WTC);
+ zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_IDR_ALL_MASK);
+ zynqmp_gqspi_write(xqspi,
+ GQSPI_QSPIDMA_DST_I_DIS_OFST,
+ GQSPI_QSPIDMA_DST_INTR_ALL_MASK);
+ /* Disable the GQSPI */
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ /* Manual start */
+ config_reg |= GQSPI_CFG_GEN_FIFO_START_MODE_MASK;
+ /* Little endian by default */
+ config_reg &= ~GQSPI_CFG_ENDIAN_MASK;
+ /* Disable poll time out */
+ config_reg &= ~GQSPI_CFG_EN_POLL_TO_MASK;
+ /* Set hold bit */
+ config_reg |= GQSPI_CFG_WP_HOLD_MASK;
+ /* Clear pre-scalar by default */
+ config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
+ /* CPHA 0 */
+ config_reg &= ~GQSPI_CFG_CLK_PHA_MASK;
+ /* CPOL 0 */
+ config_reg &= ~GQSPI_CFG_CLK_POL_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+
+ /* Clear the TX and RX FIFO */
+ zynqmp_gqspi_write(xqspi, GQSPI_FIFO_CTRL_OFST,
+ GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK |
+ GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK |
+ GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK);
+ /* Set by default to allow for high frequencies */
+ zynqmp_gqspi_write(xqspi, GQSPI_LPBK_DLY_ADJ_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_LPBK_DLY_ADJ_OFST) |
+ GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
+ /* Reset thresholds */
+ zynqmp_gqspi_write(xqspi, GQSPI_TX_THRESHOLD_OFST,
+ GQSPI_TX_FIFO_THRESHOLD_RESET_VAL);
+ zynqmp_gqspi_write(xqspi, GQSPI_RX_THRESHOLD_OFST,
+ GQSPI_RX_FIFO_THRESHOLD);
+ zynqmp_gqspi_write(xqspi, GQSPI_GF_THRESHOLD_OFST,
+ GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL);
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_LOWER,
+ GQSPI_SELECT_FLASH_BUS_LOWER);
+ /* Initialize DMA */
+ zynqmp_gqspi_write(xqspi,
+ GQSPI_QSPIDMA_DST_CTRL_OFST,
+ GQSPI_QSPIDMA_DST_CTRL_RESET_VAL);
+
+ /* Enable the GQSPI */
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
+}
+
+/**
+ * zynqmp_qspi_copy_read_data - Copy data to RX buffer
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @data: The variable where data is stored
+ * @size: Number of bytes to be copied from data to RX buffer
+ */
+static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
+ ulong data, u8 size)
+{
+ memcpy(xqspi->rxbuf, &data, size);
+ xqspi->rxbuf += size;
+ xqspi->bytes_to_receive -= size;
+}
+
+/**
+ * zynqmp_qspi_chipselect - Select or deselect the chip select line
+ * @qspi: Pointer to the spi_device structure
+ * @is_high: Select(0) or deselect (1) the chip select line
+ */
+static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
+{
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master);
+ ulong timeout;
+ u32 genfifoentry = 0, statusreg;
+
+ genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
+
+ if (!is_high) {
+ if (!qspi->chip_select) {
+ xqspi->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
+ xqspi->genfifocs = GQSPI_GENFIFO_CS_LOWER;
+ } else {
+ xqspi->genfifobus = GQSPI_GENFIFO_BUS_UPPER;
+ xqspi->genfifocs = GQSPI_GENFIFO_CS_UPPER;
+ }
+ genfifoentry |= xqspi->genfifobus;
+ genfifoentry |= xqspi->genfifocs;
+ genfifoentry |= GQSPI_GENFIFO_CS_SETUP;
+ } else {
+ genfifoentry |= GQSPI_GENFIFO_CS_HOLD;
+ }
+
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
+
+ /* Manually start the generic FIFO command */
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ /* Wait until the generic FIFO command is empty */
+ do {
+ statusreg = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
+
+ if ((statusreg & GQSPI_ISR_GENFIFOEMPTY_MASK) &&
+ (statusreg & GQSPI_ISR_TXEMPTY_MASK))
+ break;
+ cpu_relax();
+ } while (!time_after_eq(jiffies, timeout));
+
+ if (time_after_eq(jiffies, timeout))
+ dev_err(xqspi->dev, "Chip select timed out\n");
+}
+
+/**
+ * zynqmp_qspi_selectspimode - Selects SPI mode - x1 or x2 or x4.
+ * @xqspi: xqspi is a pointer to the GQSPI instance
+ * @spimode: spimode - SPI or DUAL or QUAD.
+ * Return: Mask to set desired SPI mode in GENFIFO entry.
+ */
+static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
+ u8 spimode)
+{
+ u32 mask = 0;
+
+ switch (spimode) {
+ case GQSPI_SELECT_MODE_DUALSPI:
+ mask = GQSPI_GENFIFO_MODE_DUALSPI;
+ break;
+ case GQSPI_SELECT_MODE_QUADSPI:
+ mask = GQSPI_GENFIFO_MODE_QUADSPI;
+ break;
+ case GQSPI_SELECT_MODE_SPI:
+ mask = GQSPI_GENFIFO_MODE_SPI;
+ break;
+ default:
+ dev_warn(xqspi->dev, "Invalid SPI mode\n");
+ }
+
+ return mask;
+}
+
+/**
+ * zynqmp_qspi_config_op - Configure QSPI controller for specified
+ * transfer
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @qspi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI transfer and
+ * sets the requested clock frequency.
+ *
+ * Return: Always 0
+ *
+ * Note:
+ * If the requested frequency is not an exact match with what can be
+ * obtained using the pre-scalar value, the driver sets the clock
+ * frequency which is lower than the requested frequency (maximum lower)
+ * for the transfer.
+ *
+ * If the requested frequency is higher or lower than that is supported
+ * by the QSPI controller the driver will set the highest or lowest
+ * frequency supported by controller.
+ */
+static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
+ struct spi_device *qspi)
+{
+ ulong clk_rate;
+ u32 config_reg, baud_rate_val = 0;
+
+ /* Set the clock frequency */
+ /* If req_hz == 0, default to lowest speed */
+ clk_rate = clk_get_rate(xqspi->refclk);
+
+ while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
+ (clk_rate /
+ (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > qspi->max_speed_hz)
+ baud_rate_val++;
+
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) & (~GQSPI_CFG_CLK_POL_MASK);
+
+ if (qspi->mode & SPI_CPHA)
+ config_reg |= GQSPI_CFG_CLK_PHA_MASK;
+ if (qspi->mode & SPI_CPOL)
+ config_reg |= GQSPI_CFG_CLK_POL_MASK;
+
+ config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
+ config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ return 0;
+}
+
+/**
+ * zynqmp_qspi_setup_op - Configure the QSPI controller
+ * @qspi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI transfer,
+ * baud rate and divisor value to setup the requested qspi clock.
+ *
+ * Return: 0 on success; error value otherwise.
+ */
+static int zynqmp_qspi_setup_op(struct spi_device *qspi)
+{
+ struct spi_controller *ctlr = qspi->master;
+ struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
+
+ if (ctlr->busy)
+ return -EBUSY;
+
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
+
+ return 0;
+}
+
+/**
+ * zynqmp_qspi_filltxfifo - Fills the TX FIFO as long as there is room in
+ * the FIFO or the bytes required to be
+ * transmitted.
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @size: Number of bytes to be copied from TX buffer to TX FIFO
+ */
+static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
+{
+ u32 count = 0, intermediate;
+
+ while ((xqspi->bytes_to_transfer > 0) && (count < size) && (xqspi->txbuf)) {
+ if (xqspi->bytes_to_transfer >= 4) {
+ memcpy(&intermediate, xqspi->txbuf, 4);
+ xqspi->txbuf += 4;
+ xqspi->bytes_to_transfer -= 4;
+ count += 4;
+ } else {
+ memcpy(&intermediate, xqspi->txbuf,
+ xqspi->bytes_to_transfer);
+ xqspi->txbuf += xqspi->bytes_to_transfer;
+ xqspi->bytes_to_transfer = 0;
+ count += xqspi->bytes_to_transfer;
+ }
+ zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
+ }
+}
+
+/**
+ * zynqmp_qspi_readrxfifo - Fills the RX FIFO as long as there is room in
+ * the FIFO.
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @size: Number of bytes to be copied from RX buffer to RX FIFO
+ */
+static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
+{
+ ulong data;
+ int count = 0;
+
+ while ((count < size) && (xqspi->bytes_to_receive > 0)) {
+ if (xqspi->bytes_to_receive >= 4) {
+ (*(u32 *)xqspi->rxbuf) =
+ zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
+ xqspi->rxbuf += 4;
+ xqspi->bytes_to_receive -= 4;
+ count += 4;
+ } else {
+ data = zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
+ count += xqspi->bytes_to_receive;
+ zynqmp_qspi_copy_read_data(xqspi, data,
+ xqspi->bytes_to_receive);
+ xqspi->bytes_to_receive = 0;
+ }
+ }
+}
+
+/**
+ * zynqmp_qspi_fillgenfifo - Fills the GENFIFO.
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @nbits: Transfer/Receive buswidth.
+ * @genfifoentry: Variable in which GENFIFO mask is saved
+ */
+static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
+ u32 genfifoentry)
+{
+ u32 transfer_len = 0;
+
+ if (xqspi->txbuf) {
+ genfifoentry &= ~GQSPI_GENFIFO_RX;
+ genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ genfifoentry |= GQSPI_GENFIFO_TX;
+ transfer_len = xqspi->bytes_to_transfer;
+ } else if (xqspi->rxbuf) {
+ genfifoentry &= ~GQSPI_GENFIFO_TX;
+ genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ genfifoentry |= GQSPI_GENFIFO_RX;
+ if (xqspi->mode == GQSPI_MODE_DMA)
+ transfer_len = xqspi->dma_rx_bytes;
+ else
+ transfer_len = xqspi->bytes_to_receive;
+ } else {
+ /* Sending dummy circles here */
+ genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
+ genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ transfer_len = xqspi->bytes_to_transfer;
+ }
+ genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
+ xqspi->genfifoentry = genfifoentry;
+
+ if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
+ genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ genfifoentry |= transfer_len;
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
+ } else {
+ int tempcount = transfer_len;
+ u32 exponent = 8; /* 2^8 = 256 */
+ u8 imm_data = tempcount & 0xFF;
+
+ tempcount &= ~(tempcount & 0xFF);
+ /* Immediate entry */
+ if (tempcount != 0) {
+ /* Exponent entries */
+ genfifoentry |= GQSPI_GENFIFO_EXP;
+ while (tempcount != 0) {
+ if (tempcount & GQSPI_GENFIFO_EXP_START) {
+ genfifoentry &=
+ ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ genfifoentry |= exponent;
+ zynqmp_gqspi_write(xqspi,
+ GQSPI_GEN_FIFO_OFST,
+ genfifoentry);
+ }
+ tempcount = tempcount >> 1;
+ exponent++;
+ }
+ }
+ if (imm_data != 0) {
+ genfifoentry &= ~GQSPI_GENFIFO_EXP;
+ genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ genfifoentry |= (u8)(imm_data & 0xFF);
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
+ genfifoentry);
+ }
+ }
+ if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) {
+ /* Dummy generic FIFO entry */
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
+ }
+}
+
+/**
+ * zynqmp_process_dma_irq - Handler for DMA done interrupt of QSPI
+ * controller
+ * @xqspi: zynqmp_qspi instance pointer
+ *
+ * This function handles DMA interrupt only.
+ */
+static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg, genfifoentry;
+
+ dma_unmap_single(xqspi->dev, xqspi->dma_addr,
+ xqspi->dma_rx_bytes, DMA_FROM_DEVICE);
+ xqspi->rxbuf += xqspi->dma_rx_bytes;
+ xqspi->bytes_to_receive -= xqspi->dma_rx_bytes;
+ xqspi->dma_rx_bytes = 0;
+
+ /* Disabling the DMA interrupts */
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_DIS_OFST,
+ GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
+
+ if (xqspi->bytes_to_receive > 0) {
+ /* Switch to IO mode,for remaining bytes to receive */
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+
+ /* Initiate the transfer of remaining bytes */
+ genfifoentry = xqspi->genfifoentry;
+ genfifoentry |= xqspi->bytes_to_receive;
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
+
+ /* Dummy generic FIFO entry */
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
+
+ /* Manual start */
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ (zynqmp_gqspi_read(xqspi,
+ GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK));
+
+ /* Enable the RX interrupts for IO mode */
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_RXNEMPTY_MASK |
+ GQSPI_IER_RXEMPTY_MASK);
+ }
+}
+
+/**
+ * zynqmp_qspi_irq - Interrupt service routine of the QSPI controller
+ * @irq: IRQ number
+ * @dev_id: Pointer to the xqspi structure
+ *
+ * This function handles TX empty only.
+ * On TX empty interrupt this function reads the received data from RX FIFO
+ * and fills the TX FIFO if there is any data remaining to be transferred.
+ *
+ * Return: IRQ_HANDLED when interrupt is handled
+ * IRQ_NONE otherwise.
+ */
+static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
+{
+ struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ u32 status, mask, dma_status = 0;
+
+ status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
+ zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST, status);
+ mask = (status & ~(zynqmp_gqspi_read(xqspi, GQSPI_IMASK_OFST)));
+
+ /* Read and clear DMA status */
+ if (xqspi->mode == GQSPI_MODE_DMA) {
+ dma_status =
+ zynqmp_gqspi_read(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST);
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
+ dma_status);
+ }
+
+ if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
+ zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL);
+ ret = IRQ_HANDLED;
+ }
+
+ if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
+ zynqmp_process_dma_irq(xqspi);
+ ret = IRQ_HANDLED;
+ } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+ (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
+ zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
+ ret = IRQ_HANDLED;
+ }
+
+ if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 &&
+ ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
+ zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+ complete(&xqspi->data_completion);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+/**
+ * zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
+ * @xqspi: xqspi is a pointer to the GQSPI instance.
+ */
+static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+{
+ u32 rx_bytes, rx_rem, config_reg;
+ dma_addr_t addr;
+ u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
+
+ if (xqspi->bytes_to_receive < 8 ||
+ ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
+ /* Setting to IO mode */
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->mode = GQSPI_MODE_IO;
+ xqspi->dma_rx_bytes = 0;
+ return 0;
+ }
+
+ rx_rem = xqspi->bytes_to_receive % 4;
+ rx_bytes = (xqspi->bytes_to_receive - rx_rem);
+
+ addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
+ rx_bytes, DMA_FROM_DEVICE);
+ if (dma_mapping_error(xqspi->dev, addr)) {
+ dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
+ return -ENOMEM;
+ }
+
+ xqspi->dma_rx_bytes = rx_bytes;
+ xqspi->dma_addr = addr;
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_OFST,
+ (u32)(addr & 0xffffffff));
+ addr = ((addr >> 16) >> 16);
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST,
+ ((u32)addr) & 0xfff);
+
+ /* Enabling the DMA mode */
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+
+ /* Switch to DMA mode */
+ xqspi->mode = GQSPI_MODE_DMA;
+
+ /* Write the number of bytes to transfer */
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
+
+ return 0;
+}
+
+/**
+ * zynqmp_qspi_write_op - This function sets up the GENFIFO entries,
+ * TX FIFO, and fills the TX FIFO with as many
+ * bytes as possible.
+ * @xqspi: Pointer to the GQSPI instance.
+ * @tx_nbits: Transfer buswidth.
+ * @genfifoentry: Variable in which GENFIFO mask is returned
+ * to calling function
+ */
+static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
+ u32 genfifoentry)
+{
+ u32 config_reg;
+
+ zynqmp_qspi_fillgenfifo(xqspi, tx_nbits, genfifoentry);
+ zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH);
+ if (xqspi->mode == GQSPI_MODE_DMA) {
+ config_reg = zynqmp_gqspi_read(xqspi,
+ GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ config_reg);
+ xqspi->mode = GQSPI_MODE_IO;
+ }
+}
+
+/**
+ * zynqmp_qspi_read_op - This function sets up the GENFIFO entries and
+ * RX DMA operation.
+ * @xqspi: xqspi is a pointer to the GQSPI instance.
+ * @rx_nbits: Receive buswidth.
+ * @genfifoentry: genfifoentry is pointer to the variable in which
+ * GENFIFO mask is returned to calling function
+ */
+static int zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
+ u32 genfifoentry)
+{
+ int ret;
+
+ ret = zynqmp_qspi_setuprxdma(xqspi);
+ if (ret)
+ return ret;
+ zynqmp_qspi_fillgenfifo(xqspi, rx_nbits, genfifoentry);
+
+ return 0;
+}
+
+/**
+ * zynqmp_qspi_suspend - Suspend method for the QSPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function stops the QSPI driver queue and disables the QSPI controller
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
+{
+ struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
+ struct spi_controller *ctlr = xqspi->ctlr;
+ int ret;
+
+ ret = spi_controller_suspend(ctlr);
+ if (ret)
+ return ret;
+
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+
+ return 0;
+}
+
+/**
+ * zynqmp_qspi_resume - Resume method for the QSPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * The function starts the QSPI driver queue and initializes the QSPI
+ * controller
+ *
+ * Return: 0 on success; error value otherwise
+ */
+static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
+{
+ struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
+ struct spi_controller *ctlr = xqspi->ctlr;
+
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
+
+ spi_controller_resume(ctlr);
+
+ return 0;
+}
+
+/**
+ * zynqmp_runtime_suspend - Runtime suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function disables the clocks
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
+{
+ struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(xqspi->refclk);
+ clk_disable_unprepare(xqspi->pclk);
+
+ return 0;
+}
+
+/**
+ * zynqmp_runtime_resume - Runtime resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function enables the clocks
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
+{
+ struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable APB clock.\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xqspi->refclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable device clock.\n");
+ clk_disable_unprepare(xqspi->pclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_qspi_exec_op() - Initiates the QSPI transfer
+ * @mem: The SPI memory
+ * @op: The memory operation to execute
+ *
+ * Executes a memory operation.
+ *
+ * This function first selects the chip and starts the memory operation.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct zynqmp_qspi *xqspi = spi_controller_get_devdata
+ (mem->spi->master);
+ int err = 0, i;
+ u32 genfifoentry = 0;
+ u16 opcode = op->cmd.opcode;
+ u64 opaddr;
+
+ dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth);
+
+ mutex_lock(&xqspi->op_lock);
+ zynqmp_qspi_config_op(xqspi, mem->spi);
+ zynqmp_qspi_chipselect(mem->spi, false);
+ genfifoentry |= xqspi->genfifocs;
+ genfifoentry |= xqspi->genfifobus;
+
+ if (op->cmd.opcode) {
+ reinit_completion(&xqspi->data_completion);
+ xqspi->txbuf = &opcode;
+ xqspi->rxbuf = NULL;
+ xqspi->bytes_to_transfer = op->cmd.nbytes;
+ xqspi->bytes_to_receive = 0;
+ zynqmp_qspi_write_op(xqspi, op->cmd.buswidth, genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_TXNOT_FULL_MASK);
+ if (!wait_for_completion_timeout
+ (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+ err = -ETIMEDOUT;
+ goto return_err;
+ }
+ }
+
+ if (op->addr.nbytes) {
+ xqspi->txbuf = &opaddr;
+ for (i = 0; i < op->addr.nbytes; i++) {
+ *(((u8 *)xqspi->txbuf) + i) = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+ }
+
+ reinit_completion(&xqspi->data_completion);
+ xqspi->rxbuf = NULL;
+ xqspi->bytes_to_transfer = op->addr.nbytes;
+ xqspi->bytes_to_receive = 0;
+ zynqmp_qspi_write_op(xqspi, op->addr.buswidth, genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read(xqspi,
+ GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_TXEMPTY_MASK |
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_TXNOT_FULL_MASK);
+ if (!wait_for_completion_timeout
+ (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+ err = -ETIMEDOUT;
+ goto return_err;
+ }
+ }
+
+ if (op->dummy.nbytes) {
+ xqspi->txbuf = NULL;
+ xqspi->rxbuf = NULL;
+ /*
+ * xqspi->bytes_to_transfer here represents the dummy circles
+ * which need to be sent.
+ */
+ xqspi->bytes_to_transfer = op->dummy.nbytes * 8 / op->dummy.buswidth;
+ xqspi->bytes_to_receive = 0;
+ /*
+ * Using op->data.buswidth instead of op->dummy.buswidth here because
+ * we need to use it to configure the correct SPI mode.
+ */
+ zynqmp_qspi_write_op(xqspi, op->data.buswidth,
+ genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+ }
+
+ if (op->data.nbytes) {
+ reinit_completion(&xqspi->data_completion);
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ xqspi->txbuf = (u8 *)op->data.buf.out;
+ xqspi->rxbuf = NULL;
+ xqspi->bytes_to_transfer = op->data.nbytes;
+ xqspi->bytes_to_receive = 0;
+ zynqmp_qspi_write_op(xqspi, op->data.buswidth,
+ genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read
+ (xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_TXEMPTY_MASK |
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_TXNOT_FULL_MASK);
+ } else {
+ xqspi->txbuf = NULL;
+ xqspi->rxbuf = (u8 *)op->data.buf.in;
+ xqspi->bytes_to_receive = op->data.nbytes;
+ xqspi->bytes_to_transfer = 0;
+ err = zynqmp_qspi_read_op(xqspi, op->data.buswidth,
+ genfifoentry);
+ if (err)
+ goto return_err;
+
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read
+ (xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+ if (xqspi->mode == GQSPI_MODE_DMA) {
+ zynqmp_gqspi_write
+ (xqspi, GQSPI_QSPIDMA_DST_I_EN_OFST,
+ GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
+ } else {
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_RXNEMPTY_MASK |
+ GQSPI_IER_RXEMPTY_MASK);
+ }
+ }
+ if (!wait_for_completion_timeout
+ (&xqspi->data_completion, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+
+return_err:
+
+ zynqmp_qspi_chipselect(mem->spi, true);
+ mutex_unlock(&xqspi->op_lock);
+
+ return err;
+}
+
+static const struct dev_pm_ops zynqmp_qspi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(zynqmp_runtime_suspend,
+ zynqmp_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_qspi_suspend, zynqmp_qspi_resume)
+};
+
+static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
+ .exec_op = zynqmp_qspi_exec_op,
+};
+
+/**
+ * zynqmp_qspi_probe - Probe method for the QSPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success; error value otherwise
+ */
+static int zynqmp_qspi_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct spi_controller *ctlr;
+ struct zynqmp_qspi *xqspi;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ u32 num_cs;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ xqspi = spi_controller_get_devdata(ctlr);
+ xqspi->dev = dev;
+ xqspi->ctlr = ctlr;
+ platform_set_drvdata(pdev, xqspi);
+
+ xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xqspi->regs)) {
+ ret = PTR_ERR(xqspi->regs);
+ goto remove_master;
+ }
+
+ xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(xqspi->pclk)) {
+ dev_err(dev, "pclk clock not found.\n");
+ ret = PTR_ERR(xqspi->pclk);
+ goto remove_master;
+ }
+
+ xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(xqspi->refclk)) {
+ dev_err(dev, "ref_clk clock not found.\n");
+ ret = PTR_ERR(xqspi->refclk);
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(dev, "Unable to enable APB clock.\n");
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xqspi->refclk);
+ if (ret) {
+ dev_err(dev, "Unable to enable device clock.\n");
+ goto clk_dis_pclk;
+ }
+
+ init_completion(&xqspi->data_completion);
+
+ mutex_init(&xqspi->op_lock);
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to pm_runtime_get_sync: %d\n", ret);
+ goto clk_dis_all;
+ }
+
+ /* QSPI controller initializations */
+ zynqmp_qspi_init_hw(xqspi);
+
+ xqspi->irq = platform_get_irq(pdev, 0);
+ if (xqspi->irq <= 0) {
+ ret = -ENXIO;
+ goto clk_dis_all;
+ }
+ ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq,
+ 0, pdev->name, xqspi);
+ if (ret != 0) {
+ ret = -ENXIO;
+ dev_err(dev, "request_irq failed\n");
+ goto clk_dis_all;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret)
+ goto clk_dis_all;
+
+ ret = of_property_read_u32(np, "num-cs", &num_cs);
+ if (ret < 0) {
+ ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ } else if (num_cs > GQSPI_MAX_NUM_CS) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "only %d chip selects are available\n",
+ GQSPI_MAX_NUM_CS);
+ goto clk_dis_all;
+ } else {
+ ctlr->num_chipselect = num_cs;
+ }
+
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mem_ops = &zynqmp_qspi_mem_ops;
+ ctlr->setup = zynqmp_qspi_setup_op;
+ ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->dev.of_node = np;
+ ctlr->auto_runtime_pm = true;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_controller failed\n");
+ goto clk_dis_all;
+ }
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+clk_dis_all:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ clk_disable_unprepare(xqspi->refclk);
+clk_dis_pclk:
+ clk_disable_unprepare(xqspi->pclk);
+remove_master:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+/**
+ * zynqmp_qspi_remove - Remove method for the QSPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 Always
+ */
+static int zynqmp_qspi_remove(struct platform_device *pdev)
+{
+ struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ clk_disable_unprepare(xqspi->refclk);
+ clk_disable_unprepare(xqspi->pclk);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_qspi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-qspi-1.0", },
+ { /* End of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
+
+static struct platform_driver zynqmp_qspi_driver = {
+ .probe = zynqmp_qspi_probe,
+ .remove = zynqmp_qspi_remove,
+ .driver = {
+ .name = "zynqmp-qspi",
+ .of_match_table = zynqmp_qspi_of_match,
+ .pm = &zynqmp_qspi_dev_pm_ops,
+ },
+};
+
+module_platform_driver(zynqmp_qspi_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Zynqmp QSPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
new file mode 100644
index 000000000..19688f333
--- /dev/null
+++ b/drivers/spi/spi.c
@@ -0,0 +1,4609 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// SPI init/core code
+//
+// Copyright (C) 2005 David Brownell
+// Copyright (C) 2008 Secret Lab Technologies Ltd.
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/property.h>
+#include <linux/export.h>
+#include <linux/sched/rt.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/ioport.h>
+#include <linux/acpi.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/platform_data/x86/apple.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/percpu.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/spi.h>
+EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
+EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
+
+#include "internals.h"
+
+static DEFINE_IDR(spi_master_idr);
+
+static void spidev_release(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ spi_controller_put(spi->controller);
+ kfree(spi->driver_override);
+ free_percpu(spi->pcpu_statistics);
+ kfree(spi);
+}
+
+static ssize_t
+modalias_show(struct device *dev, struct device_attribute *a, char *buf)
+{
+ const struct spi_device *spi = to_spi_device(dev);
+ int len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
+
+ return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ int ret;
+
+ ret = driver_set_override(dev, &spi->driver_override, buf, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *a, char *buf)
+{
+ const struct spi_device *spi = to_spi_device(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
+ device_unlock(dev);
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
+{
+ struct spi_statistics __percpu *pcpu_stats;
+
+ if (dev)
+ pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
+ else
+ pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
+
+ if (pcpu_stats) {
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct spi_statistics *stat;
+
+ stat = per_cpu_ptr(pcpu_stats, cpu);
+ u64_stats_init(&stat->syncp);
+ }
+ }
+ return pcpu_stats;
+}
+
+#define spi_pcpu_stats_totalize(ret, in, field) \
+do { \
+ int i; \
+ ret = 0; \
+ for_each_possible_cpu(i) { \
+ const struct spi_statistics *pcpu_stats; \
+ u64 inc; \
+ unsigned int start; \
+ pcpu_stats = per_cpu_ptr(in, i); \
+ do { \
+ start = u64_stats_fetch_begin_irq( \
+ &pcpu_stats->syncp); \
+ inc = u64_stats_read(&pcpu_stats->field); \
+ } while (u64_stats_fetch_retry_irq( \
+ &pcpu_stats->syncp, start)); \
+ ret += inc; \
+ } \
+} while (0)
+
+#define SPI_STATISTICS_ATTRS(field, file) \
+static ssize_t spi_controller_##field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct spi_controller *ctlr = container_of(dev, \
+ struct spi_controller, dev); \
+ return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
+} \
+static struct device_attribute dev_attr_spi_controller_##field = { \
+ .attr = { .name = file, .mode = 0444 }, \
+ .show = spi_controller_##field##_show, \
+}; \
+static ssize_t spi_device_##field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct spi_device *spi = to_spi_device(dev); \
+ return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
+} \
+static struct device_attribute dev_attr_spi_device_##field = { \
+ .attr = { .name = file, .mode = 0444 }, \
+ .show = spi_device_##field##_show, \
+}
+
+#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
+static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
+ char *buf) \
+{ \
+ ssize_t len; \
+ u64 val; \
+ spi_pcpu_stats_totalize(val, stat, field); \
+ len = sysfs_emit(buf, "%llu\n", val); \
+ return len; \
+} \
+SPI_STATISTICS_ATTRS(name, file)
+
+#define SPI_STATISTICS_SHOW(field) \
+ SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
+ field)
+
+SPI_STATISTICS_SHOW(messages);
+SPI_STATISTICS_SHOW(transfers);
+SPI_STATISTICS_SHOW(errors);
+SPI_STATISTICS_SHOW(timedout);
+
+SPI_STATISTICS_SHOW(spi_sync);
+SPI_STATISTICS_SHOW(spi_sync_immediate);
+SPI_STATISTICS_SHOW(spi_async);
+
+SPI_STATISTICS_SHOW(bytes);
+SPI_STATISTICS_SHOW(bytes_rx);
+SPI_STATISTICS_SHOW(bytes_tx);
+
+#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
+ SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
+ "transfer_bytes_histo_" number, \
+ transfer_bytes_histo[index])
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
+SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
+
+SPI_STATISTICS_SHOW(transfers_split_maxsize);
+
+static struct attribute *spi_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+static const struct attribute_group spi_dev_group = {
+ .attrs = spi_dev_attrs,
+};
+
+static struct attribute *spi_device_statistics_attrs[] = {
+ &dev_attr_spi_device_messages.attr,
+ &dev_attr_spi_device_transfers.attr,
+ &dev_attr_spi_device_errors.attr,
+ &dev_attr_spi_device_timedout.attr,
+ &dev_attr_spi_device_spi_sync.attr,
+ &dev_attr_spi_device_spi_sync_immediate.attr,
+ &dev_attr_spi_device_spi_async.attr,
+ &dev_attr_spi_device_bytes.attr,
+ &dev_attr_spi_device_bytes_rx.attr,
+ &dev_attr_spi_device_bytes_tx.attr,
+ &dev_attr_spi_device_transfer_bytes_histo0.attr,
+ &dev_attr_spi_device_transfer_bytes_histo1.attr,
+ &dev_attr_spi_device_transfer_bytes_histo2.attr,
+ &dev_attr_spi_device_transfer_bytes_histo3.attr,
+ &dev_attr_spi_device_transfer_bytes_histo4.attr,
+ &dev_attr_spi_device_transfer_bytes_histo5.attr,
+ &dev_attr_spi_device_transfer_bytes_histo6.attr,
+ &dev_attr_spi_device_transfer_bytes_histo7.attr,
+ &dev_attr_spi_device_transfer_bytes_histo8.attr,
+ &dev_attr_spi_device_transfer_bytes_histo9.attr,
+ &dev_attr_spi_device_transfer_bytes_histo10.attr,
+ &dev_attr_spi_device_transfer_bytes_histo11.attr,
+ &dev_attr_spi_device_transfer_bytes_histo12.attr,
+ &dev_attr_spi_device_transfer_bytes_histo13.attr,
+ &dev_attr_spi_device_transfer_bytes_histo14.attr,
+ &dev_attr_spi_device_transfer_bytes_histo15.attr,
+ &dev_attr_spi_device_transfer_bytes_histo16.attr,
+ &dev_attr_spi_device_transfers_split_maxsize.attr,
+ NULL,
+};
+
+static const struct attribute_group spi_device_statistics_group = {
+ .name = "statistics",
+ .attrs = spi_device_statistics_attrs,
+};
+
+static const struct attribute_group *spi_dev_groups[] = {
+ &spi_dev_group,
+ &spi_device_statistics_group,
+ NULL,
+};
+
+static struct attribute *spi_controller_statistics_attrs[] = {
+ &dev_attr_spi_controller_messages.attr,
+ &dev_attr_spi_controller_transfers.attr,
+ &dev_attr_spi_controller_errors.attr,
+ &dev_attr_spi_controller_timedout.attr,
+ &dev_attr_spi_controller_spi_sync.attr,
+ &dev_attr_spi_controller_spi_sync_immediate.attr,
+ &dev_attr_spi_controller_spi_async.attr,
+ &dev_attr_spi_controller_bytes.attr,
+ &dev_attr_spi_controller_bytes_rx.attr,
+ &dev_attr_spi_controller_bytes_tx.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo0.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo1.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo2.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo3.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo4.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo5.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo6.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo7.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo8.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo9.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo10.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo11.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo12.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo13.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo14.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo15.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo16.attr,
+ &dev_attr_spi_controller_transfers_split_maxsize.attr,
+ NULL,
+};
+
+static const struct attribute_group spi_controller_statistics_group = {
+ .name = "statistics",
+ .attrs = spi_controller_statistics_attrs,
+};
+
+static const struct attribute_group *spi_master_groups[] = {
+ &spi_controller_statistics_group,
+ NULL,
+};
+
+static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
+ struct spi_transfer *xfer,
+ struct spi_controller *ctlr)
+{
+ int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
+ struct spi_statistics *stats;
+
+ if (l2len < 0)
+ l2len = 0;
+
+ get_cpu();
+ stats = this_cpu_ptr(pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+
+ u64_stats_inc(&stats->transfers);
+ u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
+
+ u64_stats_add(&stats->bytes, xfer->len);
+ if ((xfer->tx_buf) &&
+ (xfer->tx_buf != ctlr->dummy_tx))
+ u64_stats_add(&stats->bytes_tx, xfer->len);
+ if ((xfer->rx_buf) &&
+ (xfer->rx_buf != ctlr->dummy_rx))
+ u64_stats_add(&stats->bytes_rx, xfer->len);
+
+ u64_stats_update_end(&stats->syncp);
+ put_cpu();
+}
+
+/*
+ * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
+ * and the sysfs version makes coldplug work too.
+ */
+static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
+{
+ while (id->name[0]) {
+ if (!strcmp(name, id->name))
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
+{
+ const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
+
+ return spi_match_id(sdrv->id_table, sdev->modalias);
+}
+EXPORT_SYMBOL_GPL(spi_get_device_id);
+
+const void *spi_get_device_match_data(const struct spi_device *sdev)
+{
+ const void *match;
+
+ match = device_get_match_data(&sdev->dev);
+ if (match)
+ return match;
+
+ return (const void *)spi_get_device_id(sdev)->driver_data;
+}
+EXPORT_SYMBOL_GPL(spi_get_device_match_data);
+
+static int spi_match_device(struct device *dev, struct device_driver *drv)
+{
+ const struct spi_device *spi = to_spi_device(dev);
+ const struct spi_driver *sdrv = to_spi_driver(drv);
+
+ /* Check override first, and if set, only use the named driver */
+ if (spi->driver_override)
+ return strcmp(spi->driver_override, drv->name) == 0;
+
+ /* Attempt an OF style match */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ /* Then try ACPI */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
+ if (sdrv->id_table)
+ return !!spi_match_id(sdrv->id_table, spi->modalias);
+
+ return strcmp(spi->modalias, drv->name) == 0;
+}
+
+static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct spi_device *spi = to_spi_device(dev);
+ int rc;
+
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
+ return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
+}
+
+static int spi_probe(struct device *dev)
+{
+ const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret;
+
+ ret = of_clk_set_defaults(dev->of_node, false);
+ if (ret)
+ return ret;
+
+ if (dev->of_node) {
+ spi->irq = of_irq_get(dev->of_node, 0);
+ if (spi->irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (spi->irq < 0)
+ spi->irq = 0;
+ }
+
+ ret = dev_pm_domain_attach(dev, true);
+ if (ret)
+ return ret;
+
+ if (sdrv->probe) {
+ ret = sdrv->probe(spi);
+ if (ret)
+ dev_pm_domain_detach(dev, true);
+ }
+
+ return ret;
+}
+
+static void spi_remove(struct device *dev)
+{
+ const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+
+ if (sdrv->remove)
+ sdrv->remove(to_spi_device(dev));
+
+ dev_pm_domain_detach(dev, true);
+}
+
+static void spi_shutdown(struct device *dev)
+{
+ if (dev->driver) {
+ const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+
+ if (sdrv->shutdown)
+ sdrv->shutdown(to_spi_device(dev));
+ }
+}
+
+struct bus_type spi_bus_type = {
+ .name = "spi",
+ .dev_groups = spi_dev_groups,
+ .match = spi_match_device,
+ .uevent = spi_uevent,
+ .probe = spi_probe,
+ .remove = spi_remove,
+ .shutdown = spi_shutdown,
+};
+EXPORT_SYMBOL_GPL(spi_bus_type);
+
+/**
+ * __spi_register_driver - register a SPI driver
+ * @owner: owner module of the driver to register
+ * @sdrv: the driver to register
+ * Context: can sleep
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
+{
+ sdrv->driver.owner = owner;
+ sdrv->driver.bus = &spi_bus_type;
+
+ /*
+ * For Really Good Reasons we use spi: modaliases not of:
+ * modaliases for DT so module autoloading won't work if we
+ * don't have a spi_device_id as well as a compatible string.
+ */
+ if (sdrv->driver.of_match_table) {
+ const struct of_device_id *of_id;
+
+ for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
+ of_id++) {
+ const char *of_name;
+
+ /* Strip off any vendor prefix */
+ of_name = strnchr(of_id->compatible,
+ sizeof(of_id->compatible), ',');
+ if (of_name)
+ of_name++;
+ else
+ of_name = of_id->compatible;
+
+ if (sdrv->id_table) {
+ const struct spi_device_id *spi_id;
+
+ spi_id = spi_match_id(sdrv->id_table, of_name);
+ if (spi_id)
+ continue;
+ } else {
+ if (strcmp(sdrv->driver.name, of_name) == 0)
+ continue;
+ }
+
+ pr_warn("SPI driver %s has no spi_device_id for %s\n",
+ sdrv->driver.name, of_id->compatible);
+ }
+ }
+
+ return driver_register(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(__spi_register_driver);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * SPI devices should normally not be created by SPI device drivers; that
+ * would make them board-specific. Similarly with SPI controller drivers.
+ * Device registration normally goes into like arch/.../mach.../board-YYY.c
+ * with other readonly (flashable) information about mainboard devices.
+ */
+
+struct boardinfo {
+ struct list_head list;
+ struct spi_board_info board_info;
+};
+
+static LIST_HEAD(board_list);
+static LIST_HEAD(spi_controller_list);
+
+/*
+ * Used to protect add/del operation for board_info list and
+ * spi_controller list, and their matching process also used
+ * to protect object of type struct idr.
+ */
+static DEFINE_MUTEX(board_lock);
+
+/**
+ * spi_alloc_device - Allocate a new SPI device
+ * @ctlr: Controller to which device is connected
+ * Context: can sleep
+ *
+ * Allows a driver to allocate and initialize a spi_device without
+ * registering it immediately. This allows a driver to directly
+ * fill the spi_device with device parameters before calling
+ * spi_add_device() on it.
+ *
+ * Caller is responsible to call spi_add_device() on the returned
+ * spi_device structure to add it to the SPI controller. If the caller
+ * needs to discard the spi_device without adding it, then it should
+ * call spi_dev_put() on it.
+ *
+ * Return: a pointer to the new device, or NULL.
+ */
+struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
+{
+ struct spi_device *spi;
+
+ if (!spi_controller_get(ctlr))
+ return NULL;
+
+ spi = kzalloc(sizeof(*spi), GFP_KERNEL);
+ if (!spi) {
+ spi_controller_put(ctlr);
+ return NULL;
+ }
+
+ spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
+ if (!spi->pcpu_statistics) {
+ kfree(spi);
+ spi_controller_put(ctlr);
+ return NULL;
+ }
+
+ spi->master = spi->controller = ctlr;
+ spi->dev.parent = &ctlr->dev;
+ spi->dev.bus = &spi_bus_type;
+ spi->dev.release = spidev_release;
+ spi->mode = ctlr->buswidth_override_bits;
+
+ device_initialize(&spi->dev);
+ return spi;
+}
+EXPORT_SYMBOL_GPL(spi_alloc_device);
+
+static void spi_dev_set_name(struct spi_device *spi)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
+
+ if (adev) {
+ dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
+ return;
+ }
+
+ dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
+ spi_get_chipselect(spi, 0));
+}
+
+static int spi_dev_check(struct device *dev, void *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_device *new_spi = data;
+
+ if (spi->controller == new_spi->controller &&
+ spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
+ return -EBUSY;
+ return 0;
+}
+
+static void spi_cleanup(struct spi_device *spi)
+{
+ if (spi->controller->cleanup)
+ spi->controller->cleanup(spi);
+}
+
+static int __spi_add_device(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
+ int status;
+
+ /*
+ * We need to make sure there's no other device with this
+ * chipselect **BEFORE** we call setup(), else we'll trash
+ * its configuration.
+ */
+ status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
+ if (status) {
+ dev_err(dev, "chipselect %d already in use\n",
+ spi_get_chipselect(spi, 0));
+ return status;
+ }
+
+ /* Controller may unregister concurrently */
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
+ !device_is_registered(&ctlr->dev)) {
+ return -ENODEV;
+ }
+
+ if (ctlr->cs_gpiods)
+ spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
+
+ /*
+ * Drivers may modify this initial i/o setup, but will
+ * normally rely on the device being setup. Devices
+ * using SPI_CS_HIGH can't coexist well otherwise...
+ */
+ status = spi_setup(spi);
+ if (status < 0) {
+ dev_err(dev, "can't setup %s, status %d\n",
+ dev_name(&spi->dev), status);
+ return status;
+ }
+
+ /* Device may be bound to an active driver when this returns */
+ status = device_add(&spi->dev);
+ if (status < 0) {
+ dev_err(dev, "can't add %s, status %d\n",
+ dev_name(&spi->dev), status);
+ spi_cleanup(spi);
+ } else {
+ dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
+ }
+
+ return status;
+}
+
+/**
+ * spi_add_device - Add spi_device allocated with spi_alloc_device
+ * @spi: spi_device to register
+ *
+ * Companion function to spi_alloc_device. Devices allocated with
+ * spi_alloc_device can be added onto the spi bus with this function.
+ *
+ * Return: 0 on success; negative errno on failure
+ */
+int spi_add_device(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
+ int status;
+
+ /* Chipselects are numbered 0..max; validate. */
+ if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
+ ctlr->num_chipselect);
+ return -EINVAL;
+ }
+
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
+
+ mutex_lock(&ctlr->add_lock);
+ status = __spi_add_device(spi);
+ mutex_unlock(&ctlr->add_lock);
+ return status;
+}
+EXPORT_SYMBOL_GPL(spi_add_device);
+
+static int spi_add_device_locked(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
+
+ /* Chipselects are numbered 0..max; validate. */
+ if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
+ ctlr->num_chipselect);
+ return -EINVAL;
+ }
+
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
+
+ WARN_ON(!mutex_is_locked(&ctlr->add_lock));
+ return __spi_add_device(spi);
+}
+
+/**
+ * spi_new_device - instantiate one new SPI device
+ * @ctlr: Controller to which device is connected
+ * @chip: Describes the SPI device
+ * Context: can sleep
+ *
+ * On typical mainboards, this is purely internal; and it's not needed
+ * after board init creates the hard-wired devices. Some development
+ * platforms may not be able to use spi_register_board_info though, and
+ * this is exported so that for example a USB or parport based adapter
+ * driver could add devices (which it would learn about out-of-band).
+ *
+ * Return: the new device, or NULL.
+ */
+struct spi_device *spi_new_device(struct spi_controller *ctlr,
+ struct spi_board_info *chip)
+{
+ struct spi_device *proxy;
+ int status;
+
+ /*
+ * NOTE: caller did any chip->bus_num checks necessary.
+ *
+ * Also, unless we change the return value convention to use
+ * error-or-pointer (not NULL-or-pointer), troubleshootability
+ * suggests syslogged diagnostics are best here (ugh).
+ */
+
+ proxy = spi_alloc_device(ctlr);
+ if (!proxy)
+ return NULL;
+
+ WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
+
+ spi_set_chipselect(proxy, 0, chip->chip_select);
+ proxy->max_speed_hz = chip->max_speed_hz;
+ proxy->mode = chip->mode;
+ proxy->irq = chip->irq;
+ strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
+ proxy->dev.platform_data = (void *) chip->platform_data;
+ proxy->controller_data = chip->controller_data;
+ proxy->controller_state = NULL;
+
+ if (chip->swnode) {
+ status = device_add_software_node(&proxy->dev, chip->swnode);
+ if (status) {
+ dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
+ chip->modalias, status);
+ goto err_dev_put;
+ }
+ }
+
+ status = spi_add_device(proxy);
+ if (status < 0)
+ goto err_dev_put;
+
+ return proxy;
+
+err_dev_put:
+ device_remove_software_node(&proxy->dev);
+ spi_dev_put(proxy);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(spi_new_device);
+
+/**
+ * spi_unregister_device - unregister a single SPI device
+ * @spi: spi_device to unregister
+ *
+ * Start making the passed SPI device vanish. Normally this would be handled
+ * by spi_unregister_controller().
+ */
+void spi_unregister_device(struct spi_device *spi)
+{
+ if (!spi)
+ return;
+
+ if (spi->dev.of_node) {
+ of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
+ of_node_put(spi->dev.of_node);
+ }
+ if (ACPI_COMPANION(&spi->dev))
+ acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
+ device_remove_software_node(&spi->dev);
+ device_del(&spi->dev);
+ spi_cleanup(spi);
+ put_device(&spi->dev);
+}
+EXPORT_SYMBOL_GPL(spi_unregister_device);
+
+static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
+ struct spi_board_info *bi)
+{
+ struct spi_device *dev;
+
+ if (ctlr->bus_num != bi->bus_num)
+ return;
+
+ dev = spi_new_device(ctlr, bi);
+ if (!dev)
+ dev_err(ctlr->dev.parent, "can't create new device for %s\n",
+ bi->modalias);
+}
+
+/**
+ * spi_register_board_info - register SPI devices for a given board
+ * @info: array of chip descriptors
+ * @n: how many descriptors are provided
+ * Context: can sleep
+ *
+ * Board-specific early init code calls this (probably during arch_initcall)
+ * with segments of the SPI device table. Any device nodes are created later,
+ * after the relevant parent SPI controller (bus_num) is defined. We keep
+ * this table of devices forever, so that reloading a controller driver will
+ * not make Linux forget about these hard-wired devices.
+ *
+ * Other code can also call this, e.g. a particular add-on board might provide
+ * SPI devices through its expansion connector, so code initializing that board
+ * would naturally declare its SPI devices.
+ *
+ * The board info passed can safely be __initdata ... but be careful of
+ * any embedded pointers (platform_data, etc), they're copied as-is.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int spi_register_board_info(struct spi_board_info const *info, unsigned n)
+{
+ struct boardinfo *bi;
+ int i;
+
+ if (!n)
+ return 0;
+
+ bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
+ if (!bi)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++, bi++, info++) {
+ struct spi_controller *ctlr;
+
+ memcpy(&bi->board_info, info, sizeof(*info));
+
+ mutex_lock(&board_lock);
+ list_add_tail(&bi->list, &board_list);
+ list_for_each_entry(ctlr, &spi_controller_list, list)
+ spi_match_controller_to_boardinfo(ctlr,
+ &bi->board_info);
+ mutex_unlock(&board_lock);
+ }
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Core methods for SPI resource management */
+
+/**
+ * spi_res_alloc - allocate a spi resource that is life-cycle managed
+ * during the processing of a spi_message while using
+ * spi_transfer_one
+ * @spi: the spi device for which we allocate memory
+ * @release: the release code to execute for this resource
+ * @size: size to alloc and return
+ * @gfp: GFP allocation flags
+ *
+ * Return: the pointer to the allocated data
+ *
+ * This may get enhanced in the future to allocate from a memory pool
+ * of the @spi_device or @spi_controller to avoid repeated allocations.
+ */
+static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
+ size_t size, gfp_t gfp)
+{
+ struct spi_res *sres;
+
+ sres = kzalloc(sizeof(*sres) + size, gfp);
+ if (!sres)
+ return NULL;
+
+ INIT_LIST_HEAD(&sres->entry);
+ sres->release = release;
+
+ return sres->data;
+}
+
+/**
+ * spi_res_free - free an spi resource
+ * @res: pointer to the custom data of a resource
+ */
+static void spi_res_free(void *res)
+{
+ struct spi_res *sres = container_of(res, struct spi_res, data);
+
+ if (!res)
+ return;
+
+ WARN_ON(!list_empty(&sres->entry));
+ kfree(sres);
+}
+
+/**
+ * spi_res_add - add a spi_res to the spi_message
+ * @message: the spi message
+ * @res: the spi_resource
+ */
+static void spi_res_add(struct spi_message *message, void *res)
+{
+ struct spi_res *sres = container_of(res, struct spi_res, data);
+
+ WARN_ON(!list_empty(&sres->entry));
+ list_add_tail(&sres->entry, &message->resources);
+}
+
+/**
+ * spi_res_release - release all spi resources for this message
+ * @ctlr: the @spi_controller
+ * @message: the @spi_message
+ */
+static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
+{
+ struct spi_res *res, *tmp;
+
+ list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
+ if (res->release)
+ res->release(ctlr, message, res->data);
+
+ list_del(&res->entry);
+
+ kfree(res);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
+{
+ bool activate = enable;
+
+ /*
+ * Avoid calling into the driver (or doing delays) if the chip select
+ * isn't actually changing from the last time this was called.
+ */
+ if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
+ (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
+ (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
+ return;
+
+ trace_spi_set_cs(spi, activate);
+
+ spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
+ spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
+
+ if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
+ spi_delay_exec(&spi->cs_hold, NULL);
+
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
+
+ if (spi_get_csgpiod(spi, 0)) {
+ if (!(spi->mode & SPI_NO_CS)) {
+ /*
+ * Historically ACPI has no means of the GPIO polarity and
+ * thus the SPISerialBus() resource defines it on the per-chip
+ * basis. In order to avoid a chain of negations, the GPIO
+ * polarity is considered being Active High. Even for the cases
+ * when _DSD() is involved (in the updated versions of ACPI)
+ * the GPIO CS polarity must be defined Active High to avoid
+ * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
+ * into account.
+ */
+ if (has_acpi_companion(&spi->dev))
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
+ }
+ /* Some SPI masters need both GPIO CS & slave_select */
+ if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
+ spi->controller->set_cs)
+ spi->controller->set_cs(spi, !enable);
+ } else if (spi->controller->set_cs) {
+ spi->controller->set_cs(spi, !enable);
+ }
+
+ if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
+ if (activate)
+ spi_delay_exec(&spi->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->cs_inactive, NULL);
+ }
+}
+
+#ifdef CONFIG_HAS_DMA
+static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ const bool vmalloced_buf = is_vmalloc_addr(buf);
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+#ifdef CONFIG_HIGHMEM
+ const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
+ (unsigned long)buf < (PKMAP_BASE +
+ (LAST_PKMAP * PAGE_SIZE)));
+#else
+ const bool kmap_buf = false;
+#endif
+ int desc_len;
+ int sgs;
+ struct page *vm_page;
+ struct scatterlist *sg;
+ void *sg_buf;
+ size_t min;
+ int i, ret;
+
+ if (vmalloced_buf || kmap_buf) {
+ desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
+ sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
+ } else if (virt_addr_valid(buf)) {
+ desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
+ sgs = DIV_ROUND_UP(len, desc_len);
+ } else {
+ return -EINVAL;
+ }
+
+ ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
+ if (ret != 0)
+ return ret;
+
+ sg = &sgt->sgl[0];
+ for (i = 0; i < sgs; i++) {
+
+ if (vmalloced_buf || kmap_buf) {
+ /*
+ * Next scatterlist entry size is the minimum between
+ * the desc_len and the remaining buffer length that
+ * fits in a page.
+ */
+ min = min_t(size_t, desc_len,
+ min_t(size_t, len,
+ PAGE_SIZE - offset_in_page(buf)));
+ if (vmalloced_buf)
+ vm_page = vmalloc_to_page(buf);
+ else
+ vm_page = kmap_to_page(buf);
+ if (!vm_page) {
+ sg_free_table(sgt);
+ return -ENOMEM;
+ }
+ sg_set_page(sg, vm_page,
+ min, offset_in_page(buf));
+ } else {
+ min = min_t(size_t, len, desc_len);
+ sg_buf = buf;
+ sg_set_buf(sg, sg_buf, min);
+ }
+
+ buf += min;
+ len -= min;
+ sg = sg_next(sg);
+ }
+
+ ret = dma_map_sgtable(dev, sgt, dir, attrs);
+ if (ret < 0) {
+ sg_free_table(sgt);
+ return ret;
+ }
+
+ return 0;
+}
+
+int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
+}
+
+static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
+ struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ if (sgt->orig_nents) {
+ dma_unmap_sgtable(dev, sgt, dir, attrs);
+ sg_free_table(sgt);
+ sgt->orig_nents = 0;
+ sgt->nents = 0;
+ }
+}
+
+void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
+}
+
+static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+{
+ struct device *tx_dev, *rx_dev;
+ struct spi_transfer *xfer;
+ int ret;
+
+ if (!ctlr->can_dma)
+ return 0;
+
+ if (ctlr->dma_tx)
+ tx_dev = ctlr->dma_tx->device->dev;
+ else if (ctlr->dma_map_dev)
+ tx_dev = ctlr->dma_map_dev;
+ else
+ tx_dev = ctlr->dev.parent;
+
+ if (ctlr->dma_rx)
+ rx_dev = ctlr->dma_rx->device->dev;
+ else if (ctlr->dma_map_dev)
+ rx_dev = ctlr->dma_map_dev;
+ else
+ rx_dev = ctlr->dev.parent;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync is done before each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (!ctlr->can_dma(ctlr, msg->spi, xfer))
+ continue;
+
+ if (xfer->tx_buf != NULL) {
+ ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE,
+ attrs);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (xfer->rx_buf != NULL) {
+ ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE, attrs);
+ if (ret != 0) {
+ spi_unmap_buf_attrs(ctlr, tx_dev,
+ &xfer->tx_sg, DMA_TO_DEVICE,
+ attrs);
+
+ return ret;
+ }
+ }
+ }
+
+ ctlr->cur_rx_dma_dev = rx_dev;
+ ctlr->cur_tx_dma_dev = tx_dev;
+ ctlr->cur_msg_mapped = true;
+
+ return 0;
+}
+
+static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
+ struct spi_transfer *xfer;
+
+ if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
+ return 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync has already been done after each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (!ctlr->can_dma(ctlr, msg->spi, xfer))
+ continue;
+
+ spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ DMA_FROM_DEVICE, attrs);
+ spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE, attrs);
+ }
+
+ ctlr->cur_msg_mapped = false;
+
+ return 0;
+}
+
+static void spi_dma_sync_for_device(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
+
+ if (!ctlr->cur_msg_mapped)
+ return;
+
+ if (xfer->tx_sg.orig_nents)
+ dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ if (xfer->rx_sg.orig_nents)
+ dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+}
+
+static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
+
+ if (!ctlr->cur_msg_mapped)
+ return;
+
+ if (xfer->rx_sg.orig_nents)
+ dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ if (xfer->tx_sg.orig_nents)
+ dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+}
+#else /* !CONFIG_HAS_DMA */
+static inline int __spi_map_msg(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static inline int __spi_unmap_msg(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static void spi_dma_sync_for_device(struct spi_controller *ctrl,
+ struct spi_transfer *xfer)
+{
+}
+
+static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
+ struct spi_transfer *xfer)
+{
+}
+#endif /* !CONFIG_HAS_DMA */
+
+static inline int spi_unmap_msg(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /*
+ * Restore the original value of tx_buf or rx_buf if they are
+ * NULL.
+ */
+ if (xfer->tx_buf == ctlr->dummy_tx)
+ xfer->tx_buf = NULL;
+ if (xfer->rx_buf == ctlr->dummy_rx)
+ xfer->rx_buf = NULL;
+ }
+
+ return __spi_unmap_msg(ctlr, msg);
+}
+
+static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ void *tmp;
+ unsigned int max_tx, max_rx;
+
+ if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
+ && !(msg->spi->mode & SPI_3WIRE)) {
+ max_tx = 0;
+ max_rx = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
+ !xfer->tx_buf)
+ max_tx = max(xfer->len, max_tx);
+ if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
+ !xfer->rx_buf)
+ max_rx = max(xfer->len, max_rx);
+ }
+
+ if (max_tx) {
+ tmp = krealloc(ctlr->dummy_tx, max_tx,
+ GFP_KERNEL | GFP_DMA | __GFP_ZERO);
+ if (!tmp)
+ return -ENOMEM;
+ ctlr->dummy_tx = tmp;
+ }
+
+ if (max_rx) {
+ tmp = krealloc(ctlr->dummy_rx, max_rx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ ctlr->dummy_rx = tmp;
+ }
+
+ if (max_tx || max_rx) {
+ list_for_each_entry(xfer, &msg->transfers,
+ transfer_list) {
+ if (!xfer->len)
+ continue;
+ if (!xfer->tx_buf)
+ xfer->tx_buf = ctlr->dummy_tx;
+ if (!xfer->rx_buf)
+ xfer->rx_buf = ctlr->dummy_rx;
+ }
+ }
+ }
+
+ return __spi_map_msg(ctlr, msg);
+}
+
+static int spi_transfer_wait(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
+ u32 speed_hz = xfer->speed_hz;
+ unsigned long long ms;
+
+ if (spi_controller_is_slave(ctlr)) {
+ if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
+ dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
+ return -EINTR;
+ }
+ } else {
+ if (!speed_hz)
+ speed_hz = 100000;
+
+ /*
+ * For each byte we wait for 8 cycles of the SPI clock.
+ * Since speed is defined in Hz and we want milliseconds,
+ * use respective multiplier, but before the division,
+ * otherwise we may get 0 for short transfers.
+ */
+ ms = 8LL * MSEC_PER_SEC * xfer->len;
+ do_div(ms, speed_hz);
+
+ /*
+ * Increase it twice and add 200 ms tolerance, use
+ * predefined maximum in case of overflow.
+ */
+ ms += ms + 200;
+ if (ms > UINT_MAX)
+ ms = UINT_MAX;
+
+ ms = wait_for_completion_timeout(&ctlr->xfer_completion,
+ msecs_to_jiffies(ms));
+
+ if (ms == 0) {
+ SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
+ SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
+ dev_err(&msg->spi->dev,
+ "SPI transfer timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static void _spi_transfer_delay_ns(u32 ns)
+{
+ if (!ns)
+ return;
+ if (ns <= NSEC_PER_USEC) {
+ ndelay(ns);
+ } else {
+ u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
+
+ if (us <= 10)
+ udelay(us);
+ else
+ usleep_range(us, us + DIV_ROUND_UP(us, 10));
+ }
+}
+
+int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
+{
+ u32 delay = _delay->value;
+ u32 unit = _delay->unit;
+ u32 hz;
+
+ if (!delay)
+ return 0;
+
+ switch (unit) {
+ case SPI_DELAY_UNIT_USECS:
+ delay *= NSEC_PER_USEC;
+ break;
+ case SPI_DELAY_UNIT_NSECS:
+ /* Nothing to do here */
+ break;
+ case SPI_DELAY_UNIT_SCK:
+ /* Clock cycles need to be obtained from spi_transfer */
+ if (!xfer)
+ return -EINVAL;
+ /*
+ * If there is unknown effective speed, approximate it
+ * by underestimating with half of the requested hz.
+ */
+ hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
+ if (!hz)
+ return -EINVAL;
+
+ /* Convert delay to nanoseconds */
+ delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return delay;
+}
+EXPORT_SYMBOL_GPL(spi_delay_to_ns);
+
+int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
+{
+ int delay;
+
+ might_sleep();
+
+ if (!_delay)
+ return -EINVAL;
+
+ delay = spi_delay_to_ns(_delay, xfer);
+ if (delay < 0)
+ return delay;
+
+ _spi_transfer_delay_ns(delay);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_delay_exec);
+
+static void _spi_transfer_cs_change_delay(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ u32 default_delay_ns = 10 * NSEC_PER_USEC;
+ u32 delay = xfer->cs_change_delay.value;
+ u32 unit = xfer->cs_change_delay.unit;
+ int ret;
+
+ /* Return early on "fast" mode - for everything but USECS */
+ if (!delay) {
+ if (unit == SPI_DELAY_UNIT_USECS)
+ _spi_transfer_delay_ns(default_delay_ns);
+ return;
+ }
+
+ ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
+ if (ret) {
+ dev_err_once(&msg->spi->dev,
+ "Use of unsupported delay unit %i, using default of %luus\n",
+ unit, default_delay_ns / NSEC_PER_USEC);
+ _spi_transfer_delay_ns(default_delay_ns);
+ }
+}
+
+/*
+ * spi_transfer_one_message - Default implementation of transfer_one_message()
+ *
+ * This is a standard implementation of transfer_one_message() for
+ * drivers which implement a transfer_one() operation. It provides
+ * standard handling of delays and chip select management.
+ */
+static int spi_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ bool keep_cs = false;
+ int ret = 0;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
+
+ xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
+ spi_set_cs(msg->spi, !xfer->cs_off, false);
+
+ SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
+ SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ trace_spi_transfer_start(msg, xfer);
+
+ spi_statistics_add_transfer_stats(statm, xfer, ctlr);
+ spi_statistics_add_transfer_stats(stats, xfer, ctlr);
+
+ if (!ctlr->ptp_sts_supported) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+
+ if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
+ reinit_completion(&ctlr->xfer_completion);
+
+fallback_pio:
+ spi_dma_sync_for_device(ctlr, xfer);
+ ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
+ if (ret < 0) {
+ spi_dma_sync_for_cpu(ctlr, xfer);
+
+ if (ctlr->cur_msg_mapped &&
+ (xfer->error & SPI_TRANS_FAIL_NO_START)) {
+ __spi_unmap_msg(ctlr, msg);
+ ctlr->fallback = true;
+ xfer->error &= ~SPI_TRANS_FAIL_NO_START;
+ goto fallback_pio;
+ }
+
+ SPI_STATISTICS_INCREMENT_FIELD(statm,
+ errors);
+ SPI_STATISTICS_INCREMENT_FIELD(stats,
+ errors);
+ dev_err(&msg->spi->dev,
+ "SPI transfer failed: %d\n", ret);
+ goto out;
+ }
+
+ if (ret > 0) {
+ ret = spi_transfer_wait(ctlr, msg, xfer);
+ if (ret < 0)
+ msg->status = ret;
+ }
+
+ spi_dma_sync_for_cpu(ctlr, xfer);
+ } else {
+ if (xfer->len)
+ dev_err(&msg->spi->dev,
+ "Bufferless transfer has length %u\n",
+ xfer->len);
+ }
+
+ if (!ctlr->ptp_sts_supported) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+
+ trace_spi_transfer_stop(msg, xfer);
+
+ if (msg->status != -EINPROGRESS)
+ goto out;
+
+ spi_transfer_delay_exec(xfer);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ if (!xfer->cs_off)
+ spi_set_cs(msg->spi, false, false);
+ _spi_transfer_cs_change_delay(msg, xfer);
+ if (!list_next_entry(xfer, transfer_list)->cs_off)
+ spi_set_cs(msg->spi, true, false);
+ }
+ } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
+ xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
+ spi_set_cs(msg->spi, xfer->cs_off, false);
+ }
+
+ msg->actual_length += xfer->len;
+ }
+
+out:
+ if (ret != 0 || !keep_cs)
+ spi_set_cs(msg->spi, false, false);
+
+ if (msg->status == -EINPROGRESS)
+ msg->status = ret;
+
+ if (msg->status && ctlr->handle_err)
+ ctlr->handle_err(ctlr, msg);
+
+ spi_finalize_current_message(ctlr);
+
+ return ret;
+}
+
+/**
+ * spi_finalize_current_transfer - report completion of a transfer
+ * @ctlr: the controller reporting completion
+ *
+ * Called by SPI drivers using the core transfer_one_message()
+ * implementation to notify it that the current interrupt driven
+ * transfer has finished and the next one may be scheduled.
+ */
+void spi_finalize_current_transfer(struct spi_controller *ctlr)
+{
+ complete(&ctlr->xfer_completion);
+}
+EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+
+static void spi_idle_runtime_pm(struct spi_controller *ctlr)
+{
+ if (ctlr->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(ctlr->dev.parent);
+ pm_runtime_put_autosuspend(ctlr->dev.parent);
+ }
+}
+
+static int __spi_pump_transfer_message(struct spi_controller *ctlr,
+ struct spi_message *msg, bool was_busy)
+{
+ struct spi_transfer *xfer;
+ int ret;
+
+ if (!was_busy && ctlr->auto_runtime_pm) {
+ ret = pm_runtime_get_sync(ctlr->dev.parent);
+ if (ret < 0) {
+ pm_runtime_put_noidle(ctlr->dev.parent);
+ dev_err(&ctlr->dev, "Failed to power device: %d\n",
+ ret);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+
+ return ret;
+ }
+ }
+
+ if (!was_busy)
+ trace_spi_controller_busy(ctlr);
+
+ if (!was_busy && ctlr->prepare_transfer_hardware) {
+ ret = ctlr->prepare_transfer_hardware(ctlr);
+ if (ret) {
+ dev_err(&ctlr->dev,
+ "failed to prepare transfer hardware: %d\n",
+ ret);
+
+ if (ctlr->auto_runtime_pm)
+ pm_runtime_put(ctlr->dev.parent);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+
+ return ret;
+ }
+ }
+
+ trace_spi_message_start(msg);
+
+ ret = spi_split_transfers_maxsize(ctlr, msg,
+ spi_max_transfer_size(msg->spi),
+ GFP_KERNEL | GFP_DMA);
+ if (ret) {
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
+
+ if (ctlr->prepare_message) {
+ ret = ctlr->prepare_message(ctlr, msg);
+ if (ret) {
+ dev_err(&ctlr->dev, "failed to prepare message: %d\n",
+ ret);
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
+ msg->prepared = true;
+ }
+
+ ret = spi_map_msg(ctlr, msg);
+ if (ret) {
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
+
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
+ /*
+ * Drivers implementation of transfer_one_message() must arrange for
+ * spi_finalize_current_message() to get called. Most drivers will do
+ * this in the calling context, but some don't. For those cases, a
+ * completion is used to guarantee that this function does not return
+ * until spi_finalize_current_message() is done accessing
+ * ctlr->cur_msg.
+ * Use of the following two flags enable to opportunistically skip the
+ * use of the completion since its use involves expensive spin locks.
+ * In case of a race with the context that calls
+ * spi_finalize_current_message() the completion will always be used,
+ * due to strict ordering of these flags using barriers.
+ */
+ WRITE_ONCE(ctlr->cur_msg_incomplete, true);
+ WRITE_ONCE(ctlr->cur_msg_need_completion, false);
+ reinit_completion(&ctlr->cur_msg_completion);
+ smp_wmb(); /* Make these available to spi_finalize_current_message() */
+
+ ret = ctlr->transfer_one_message(ctlr, msg);
+ if (ret) {
+ dev_err(&ctlr->dev,
+ "failed to transfer one message from queue\n");
+ return ret;
+ }
+
+ WRITE_ONCE(ctlr->cur_msg_need_completion, true);
+ smp_mb(); /* See spi_finalize_current_message()... */
+ if (READ_ONCE(ctlr->cur_msg_incomplete))
+ wait_for_completion(&ctlr->cur_msg_completion);
+
+ return 0;
+}
+
+/**
+ * __spi_pump_messages - function which processes spi message queue
+ * @ctlr: controller to process queue for
+ * @in_kthread: true if we are in the context of the message pump thread
+ *
+ * This function checks if there is any spi message in the queue that
+ * needs processing and if so call out to the driver to initialize hardware
+ * and transfer each message.
+ *
+ * Note that it is called both from the kthread itself and also from
+ * inside spi_sync(); the queue extraction handling at the top of the
+ * function should deal with this safely.
+ */
+static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
+{
+ struct spi_message *msg;
+ bool was_busy = false;
+ unsigned long flags;
+ int ret;
+
+ /* Take the IO mutex */
+ mutex_lock(&ctlr->io_mutex);
+
+ /* Lock queue */
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+
+ /* Make sure we are not already running a message */
+ if (ctlr->cur_msg)
+ goto out_unlock;
+
+ /* Check if the queue is idle */
+ if (list_empty(&ctlr->queue) || !ctlr->running) {
+ if (!ctlr->busy)
+ goto out_unlock;
+
+ /* Defer any non-atomic teardown to the thread */
+ if (!in_kthread) {
+ if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
+ !ctlr->unprepare_transfer_hardware) {
+ spi_idle_runtime_pm(ctlr);
+ ctlr->busy = false;
+ ctlr->queue_empty = true;
+ trace_spi_controller_idle(ctlr);
+ } else {
+ kthread_queue_work(ctlr->kworker,
+ &ctlr->pump_messages);
+ }
+ goto out_unlock;
+ }
+
+ ctlr->busy = false;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+
+ kfree(ctlr->dummy_rx);
+ ctlr->dummy_rx = NULL;
+ kfree(ctlr->dummy_tx);
+ ctlr->dummy_tx = NULL;
+ if (ctlr->unprepare_transfer_hardware &&
+ ctlr->unprepare_transfer_hardware(ctlr))
+ dev_err(&ctlr->dev,
+ "failed to unprepare transfer hardware\n");
+ spi_idle_runtime_pm(ctlr);
+ trace_spi_controller_idle(ctlr);
+
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ ctlr->queue_empty = true;
+ goto out_unlock;
+ }
+
+ /* Extract head of queue */
+ msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
+ ctlr->cur_msg = msg;
+
+ list_del_init(&msg->queue);
+ if (ctlr->busy)
+ was_busy = true;
+ else
+ ctlr->busy = true;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+
+ ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
+
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
+
+ mutex_unlock(&ctlr->io_mutex);
+
+ /* Prod the scheduler in case transfer_one() was busy waiting */
+ if (!ret)
+ cond_resched();
+ return;
+
+out_unlock:
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ mutex_unlock(&ctlr->io_mutex);
+}
+
+/**
+ * spi_pump_messages - kthread work function which processes spi message queue
+ * @work: pointer to kthread work struct contained in the controller struct
+ */
+static void spi_pump_messages(struct kthread_work *work)
+{
+ struct spi_controller *ctlr =
+ container_of(work, struct spi_controller, pump_messages);
+
+ __spi_pump_messages(ctlr, true);
+}
+
+/**
+ * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @progress: How many words (not bytes) have been transferred so far
+ * @irqs_off: If true, will disable IRQs and preemption for the duration of the
+ * transfer, for less jitter in time measurement. Only compatible
+ * with PIO drivers. If true, must follow up with
+ * spi_take_timestamp_post or otherwise system will crash.
+ * WARNING: for fully predictable results, the CPU frequency must
+ * also be under control (governor).
+ *
+ * This is a helper for drivers to collect the beginning of the TX timestamp
+ * for the requested byte from the SPI transfer. The frequency with which this
+ * function must be called (once per word, once for the whole transfer, once
+ * per batch of words etc) is arbitrary as long as the @tx buffer offset is
+ * greater than or equal to the requested byte at the time of the call. The
+ * timestamp is only taken once, at the first such call. It is assumed that
+ * the driver advances its @tx buffer pointer monotonically.
+ */
+void spi_take_timestamp_pre(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off)
+{
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped)
+ return;
+
+ if (progress > xfer->ptp_sts_word_pre)
+ return;
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_pre = progress;
+
+ if (irqs_off) {
+ local_irq_save(ctlr->irq_flags);
+ preempt_disable();
+ }
+
+ ptp_read_system_prets(xfer->ptp_sts);
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
+
+/**
+ * spi_take_timestamp_post - helper to collect the end of the TX timestamp
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @progress: How many words (not bytes) have been transferred so far
+ * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
+ *
+ * This is a helper for drivers to collect the end of the TX timestamp for
+ * the requested byte from the SPI transfer. Can be called with an arbitrary
+ * frequency: only the first call where @tx exceeds or is equal to the
+ * requested word will be timestamped.
+ */
+void spi_take_timestamp_post(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off)
+{
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped)
+ return;
+
+ if (progress < xfer->ptp_sts_word_post)
+ return;
+
+ ptp_read_system_postts(xfer->ptp_sts);
+
+ if (irqs_off) {
+ local_irq_restore(ctlr->irq_flags);
+ preempt_enable();
+ }
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_post = progress;
+
+ xfer->timestamped = true;
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
+
+/**
+ * spi_set_thread_rt - set the controller to pump at realtime priority
+ * @ctlr: controller to boost priority of
+ *
+ * This can be called because the controller requested realtime priority
+ * (by setting the ->rt value before calling spi_register_controller()) or
+ * because a device on the bus said that its transfers needed realtime
+ * priority.
+ *
+ * NOTE: at the moment if any device on a bus says it needs realtime then
+ * the thread will be at realtime priority for all transfers on that
+ * controller. If this eventually becomes a problem we may see if we can
+ * find a way to boost the priority only temporarily during relevant
+ * transfers.
+ */
+static void spi_set_thread_rt(struct spi_controller *ctlr)
+{
+ dev_info(&ctlr->dev,
+ "will run message pump with realtime priority\n");
+ sched_set_fifo(ctlr->kworker->task);
+}
+
+static int spi_init_queue(struct spi_controller *ctlr)
+{
+ ctlr->running = false;
+ ctlr->busy = false;
+ ctlr->queue_empty = true;
+
+ ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
+ if (IS_ERR(ctlr->kworker)) {
+ dev_err(&ctlr->dev, "failed to create message pump kworker\n");
+ return PTR_ERR(ctlr->kworker);
+ }
+
+ kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
+
+ /*
+ * Controller config will indicate if this controller should run the
+ * message pump with high (realtime) priority to reduce the transfer
+ * latency on the bus by minimising the delay between a transfer
+ * request and the scheduling of the message pump thread. Without this
+ * setting the message pump thread will remain at default priority.
+ */
+ if (ctlr->rt)
+ spi_set_thread_rt(ctlr);
+
+ return 0;
+}
+
+/**
+ * spi_get_next_queued_message() - called by driver to check for queued
+ * messages
+ * @ctlr: the controller to check for queued messages
+ *
+ * If there are more messages in the queue, the next message is returned from
+ * this call.
+ *
+ * Return: the next message in the queue, else NULL if the queue is empty.
+ */
+struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
+{
+ struct spi_message *next;
+ unsigned long flags;
+
+ /* Get a pointer to the next message, if any */
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
+ queue);
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+
+ return next;
+}
+EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
+
+/**
+ * spi_finalize_current_message() - the current message is complete
+ * @ctlr: the controller to return the message to
+ *
+ * Called by the driver to notify the core that the message in the front of the
+ * queue is complete and can be removed from the queue.
+ */
+void spi_finalize_current_message(struct spi_controller *ctlr)
+{
+ struct spi_transfer *xfer;
+ struct spi_message *mesg;
+ int ret;
+
+ mesg = ctlr->cur_msg;
+
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+ }
+
+ if (unlikely(ctlr->ptp_sts_supported))
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list)
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
+
+ spi_unmap_msg(ctlr, mesg);
+
+ /*
+ * In the prepare_messages callback the SPI bus has the opportunity
+ * to split a transfer to smaller chunks.
+ *
+ * Release the split transfers here since spi_map_msg() is done on
+ * the split transfers.
+ */
+ spi_res_release(ctlr, mesg);
+
+ if (mesg->prepared && ctlr->unprepare_message) {
+ ret = ctlr->unprepare_message(ctlr, mesg);
+ if (ret) {
+ dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
+ ret);
+ }
+ }
+
+ mesg->prepared = false;
+
+ WRITE_ONCE(ctlr->cur_msg_incomplete, false);
+ smp_mb(); /* See __spi_pump_transfer_message()... */
+ if (READ_ONCE(ctlr->cur_msg_need_completion))
+ complete(&ctlr->cur_msg_completion);
+
+ trace_spi_message_done(mesg);
+
+ mesg->state = NULL;
+ if (mesg->complete)
+ mesg->complete(mesg->context);
+}
+EXPORT_SYMBOL_GPL(spi_finalize_current_message);
+
+static int spi_start_queue(struct spi_controller *ctlr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+
+ if (ctlr->running || ctlr->busy) {
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ return -EBUSY;
+ }
+
+ ctlr->running = true;
+ ctlr->cur_msg = NULL;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
+
+ return 0;
+}
+
+static int spi_stop_queue(struct spi_controller *ctlr)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int ret = 0;
+
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+
+ /*
+ * This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the ctlr->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead.
+ */
+ while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ usleep_range(10000, 11000);
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ }
+
+ if (!list_empty(&ctlr->queue) || ctlr->busy)
+ ret = -EBUSY;
+ else
+ ctlr->running = false;
+
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+
+ if (ret) {
+ dev_warn(&ctlr->dev, "could not stop message queue\n");
+ return ret;
+ }
+ return ret;
+}
+
+static int spi_destroy_queue(struct spi_controller *ctlr)
+{
+ int ret;
+
+ ret = spi_stop_queue(ctlr);
+
+ /*
+ * kthread_flush_worker will block until all work is done.
+ * If the reason that stop_queue timed out is that the work will never
+ * finish, then it does no good to call flush/stop thread, so
+ * return anyway.
+ */
+ if (ret) {
+ dev_err(&ctlr->dev, "problem destroying queue\n");
+ return ret;
+ }
+
+ kthread_destroy_worker(ctlr->kworker);
+
+ return 0;
+}
+
+static int __spi_queued_transfer(struct spi_device *spi,
+ struct spi_message *msg,
+ bool need_pump)
+{
+ struct spi_controller *ctlr = spi->controller;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+
+ if (!ctlr->running) {
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ return -ESHUTDOWN;
+ }
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+
+ list_add_tail(&msg->queue, &ctlr->queue);
+ ctlr->queue_empty = false;
+ if (!ctlr->busy && need_pump)
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
+
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ return 0;
+}
+
+/**
+ * spi_queued_transfer - transfer function for queued transfers
+ * @spi: spi device which is requesting transfer
+ * @msg: spi message which is to handled is queued to driver queue
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ return __spi_queued_transfer(spi, msg, true);
+}
+
+static int spi_controller_initialize_queue(struct spi_controller *ctlr)
+{
+ int ret;
+
+ ctlr->transfer = spi_queued_transfer;
+ if (!ctlr->transfer_one_message)
+ ctlr->transfer_one_message = spi_transfer_one_message;
+
+ /* Initialize and start queue */
+ ret = spi_init_queue(ctlr);
+ if (ret) {
+ dev_err(&ctlr->dev, "problem initializing queue\n");
+ goto err_init_queue;
+ }
+ ctlr->queued = true;
+ ret = spi_start_queue(ctlr);
+ if (ret) {
+ dev_err(&ctlr->dev, "problem starting queue\n");
+ goto err_start_queue;
+ }
+
+ return 0;
+
+err_start_queue:
+ spi_destroy_queue(ctlr);
+err_init_queue:
+ return ret;
+}
+
+/**
+ * spi_flush_queue - Send all pending messages in the queue from the callers'
+ * context
+ * @ctlr: controller to process queue for
+ *
+ * This should be used when one wants to ensure all pending messages have been
+ * sent before doing something. Is used by the spi-mem code to make sure SPI
+ * memory operations do not preempt regular SPI transfers that have been queued
+ * before the spi-mem operation.
+ */
+void spi_flush_queue(struct spi_controller *ctlr)
+{
+ if (ctlr->transfer == spi_queued_transfer)
+ __spi_pump_messages(ctlr, false);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_OF)
+static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
+ struct device_node *nc)
+{
+ u32 value;
+ int rc;
+
+ /* Mode (clock phase/polarity/etc.) */
+ if (of_property_read_bool(nc, "spi-cpha"))
+ spi->mode |= SPI_CPHA;
+ if (of_property_read_bool(nc, "spi-cpol"))
+ spi->mode |= SPI_CPOL;
+ if (of_property_read_bool(nc, "spi-3wire"))
+ spi->mode |= SPI_3WIRE;
+ if (of_property_read_bool(nc, "spi-lsb-first"))
+ spi->mode |= SPI_LSB_FIRST;
+ if (of_property_read_bool(nc, "spi-cs-high"))
+ spi->mode |= SPI_CS_HIGH;
+
+ /* Device DUAL/QUAD mode */
+ if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
+ switch (value) {
+ case 0:
+ spi->mode |= SPI_NO_TX;
+ break;
+ case 1:
+ break;
+ case 2:
+ spi->mode |= SPI_TX_DUAL;
+ break;
+ case 4:
+ spi->mode |= SPI_TX_QUAD;
+ break;
+ case 8:
+ spi->mode |= SPI_TX_OCTAL;
+ break;
+ default:
+ dev_warn(&ctlr->dev,
+ "spi-tx-bus-width %d not supported\n",
+ value);
+ break;
+ }
+ }
+
+ if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
+ switch (value) {
+ case 0:
+ spi->mode |= SPI_NO_RX;
+ break;
+ case 1:
+ break;
+ case 2:
+ spi->mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ spi->mode |= SPI_RX_QUAD;
+ break;
+ case 8:
+ spi->mode |= SPI_RX_OCTAL;
+ break;
+ default:
+ dev_warn(&ctlr->dev,
+ "spi-rx-bus-width %d not supported\n",
+ value);
+ break;
+ }
+ }
+
+ if (spi_controller_is_slave(ctlr)) {
+ if (!of_node_name_eq(nc, "slave")) {
+ dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
+ nc);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* Device address */
+ rc = of_property_read_u32(nc, "reg", &value);
+ if (rc) {
+ dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
+ nc, rc);
+ return rc;
+ }
+ spi_set_chipselect(spi, 0, value);
+
+ /* Device speed */
+ if (!of_property_read_u32(nc, "spi-max-frequency", &value))
+ spi->max_speed_hz = value;
+
+ return 0;
+}
+
+static struct spi_device *
+of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
+{
+ struct spi_device *spi;
+ int rc;
+
+ /* Alloc an spi_device */
+ spi = spi_alloc_device(ctlr);
+ if (!spi) {
+ dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ /* Select device driver */
+ rc = of_modalias_node(nc, spi->modalias,
+ sizeof(spi->modalias));
+ if (rc < 0) {
+ dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
+ goto err_out;
+ }
+
+ rc = of_spi_parse_dt(ctlr, spi, nc);
+ if (rc)
+ goto err_out;
+
+ /* Store a pointer to the node in the device structure */
+ of_node_get(nc);
+ spi->dev.of_node = nc;
+ spi->dev.fwnode = of_fwnode_handle(nc);
+
+ /* Register the new device */
+ rc = spi_add_device(spi);
+ if (rc) {
+ dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
+ goto err_of_node_put;
+ }
+
+ return spi;
+
+err_of_node_put:
+ of_node_put(nc);
+err_out:
+ spi_dev_put(spi);
+ return ERR_PTR(rc);
+}
+
+/**
+ * of_register_spi_devices() - Register child devices onto the SPI bus
+ * @ctlr: Pointer to spi_controller device
+ *
+ * Registers an spi_device for each child node of controller node which
+ * represents a valid SPI slave.
+ */
+static void of_register_spi_devices(struct spi_controller *ctlr)
+{
+ struct spi_device *spi;
+ struct device_node *nc;
+
+ if (!ctlr->dev.of_node)
+ return;
+
+ for_each_available_child_of_node(ctlr->dev.of_node, nc) {
+ if (of_node_test_and_set_flag(nc, OF_POPULATED))
+ continue;
+ spi = of_register_spi_device(ctlr, nc);
+ if (IS_ERR(spi)) {
+ dev_warn(&ctlr->dev,
+ "Failed to create SPI device for %pOF\n", nc);
+ of_node_clear_flag(nc, OF_POPULATED);
+ }
+ }
+}
+#else
+static void of_register_spi_devices(struct spi_controller *ctlr) { }
+#endif
+
+/**
+ * spi_new_ancillary_device() - Register ancillary SPI device
+ * @spi: Pointer to the main SPI device registering the ancillary device
+ * @chip_select: Chip Select of the ancillary device
+ *
+ * Register an ancillary SPI device; for example some chips have a chip-select
+ * for normal device usage and another one for setup/firmware upload.
+ *
+ * This may only be called from main SPI device's probe routine.
+ *
+ * Return: 0 on success; negative errno on failure
+ */
+struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
+ u8 chip_select)
+{
+ struct spi_device *ancillary;
+ int rc = 0;
+
+ /* Alloc an spi_device */
+ ancillary = spi_alloc_device(spi->controller);
+ if (!ancillary) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
+
+ /* Use provided chip-select for ancillary device */
+ spi_set_chipselect(ancillary, 0, chip_select);
+
+ /* Take over SPI mode/speed from SPI main device */
+ ancillary->max_speed_hz = spi->max_speed_hz;
+ ancillary->mode = spi->mode;
+
+ /* Register the new device */
+ rc = spi_add_device_locked(ancillary);
+ if (rc) {
+ dev_err(&spi->dev, "failed to register ancillary device\n");
+ goto err_out;
+ }
+
+ return ancillary;
+
+err_out:
+ spi_dev_put(ancillary);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
+
+#ifdef CONFIG_ACPI
+struct acpi_spi_lookup {
+ struct spi_controller *ctlr;
+ u32 max_speed_hz;
+ u32 mode;
+ int irq;
+ u8 bits_per_word;
+ u8 chip_select;
+ int n;
+ int index;
+};
+
+static int acpi_spi_count(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_spi_serialbus *sb;
+ int *count = data;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return 1;
+
+ sb = &ares->data.spi_serial_bus;
+ if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
+ return 1;
+
+ *count = *count + 1;
+
+ return 1;
+}
+
+/**
+ * acpi_spi_count_resources - Count the number of SpiSerialBus resources
+ * @adev: ACPI device
+ *
+ * Returns the number of SpiSerialBus resources in the ACPI-device's
+ * resource-list; or a negative error code.
+ */
+int acpi_spi_count_resources(struct acpi_device *adev)
+{
+ LIST_HEAD(r);
+ int count = 0;
+ int ret;
+
+ ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&r);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
+
+static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
+ struct acpi_spi_lookup *lookup)
+{
+ const union acpi_object *obj;
+
+ if (!x86_apple_machine)
+ return;
+
+ if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
+ && obj->buffer.length >= 4)
+ lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
+
+ if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
+ && obj->buffer.length == 8)
+ lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
+
+ if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
+ && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
+ lookup->mode |= SPI_LSB_FIRST;
+
+ if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
+ && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
+ lookup->mode |= SPI_CPOL;
+
+ if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
+ && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
+ lookup->mode |= SPI_CPHA;
+}
+
+static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
+
+static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct acpi_spi_lookup *lookup = data;
+ struct spi_controller *ctlr = lookup->ctlr;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_spi_serialbus *sb;
+ acpi_handle parent_handle;
+ acpi_status status;
+
+ sb = &ares->data.spi_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
+
+ if (lookup->index != -1 && lookup->n++ != lookup->index)
+ return 1;
+
+ status = acpi_get_handle(NULL,
+ sb->resource_source.string_ptr,
+ &parent_handle);
+
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (ctlr) {
+ if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
+ return -ENODEV;
+ } else {
+ struct acpi_device *adev;
+
+ adev = acpi_fetch_acpi_dev(parent_handle);
+ if (!adev)
+ return -ENODEV;
+
+ ctlr = acpi_spi_find_controller_by_adev(adev);
+ if (!ctlr)
+ return -EPROBE_DEFER;
+
+ lookup->ctlr = ctlr;
+ }
+
+ /*
+ * ACPI DeviceSelection numbering is handled by the
+ * host controller driver in Windows and can vary
+ * from driver to driver. In Linux we always expect
+ * 0 .. max - 1 so we need to ask the driver to
+ * translate between the two schemes.
+ */
+ if (ctlr->fw_translate_cs) {
+ int cs = ctlr->fw_translate_cs(ctlr,
+ sb->device_selection);
+ if (cs < 0)
+ return cs;
+ lookup->chip_select = cs;
+ } else {
+ lookup->chip_select = sb->device_selection;
+ }
+
+ lookup->max_speed_hz = sb->connection_speed;
+ lookup->bits_per_word = sb->data_bit_length;
+
+ if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
+ lookup->mode |= SPI_CPHA;
+ if (sb->clock_polarity == ACPI_SPI_START_HIGH)
+ lookup->mode |= SPI_CPOL;
+ if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
+ lookup->mode |= SPI_CS_HIGH;
+ }
+ } else if (lookup->irq < 0) {
+ struct resource r;
+
+ if (acpi_dev_resource_interrupt(ares, 0, &r))
+ lookup->irq = r.start;
+ }
+
+ /* Always tell the ACPI core to skip this resource */
+ return 1;
+}
+
+/**
+ * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
+ * @ctlr: controller to which the spi device belongs
+ * @adev: ACPI Device for the spi device
+ * @index: Index of the spi resource inside the ACPI Node
+ *
+ * This should be used to allocate a new spi device from and ACPI Node.
+ * The caller is responsible for calling spi_add_device to register the spi device.
+ *
+ * If ctlr is set to NULL, the Controller for the spi device will be looked up
+ * using the resource.
+ * If index is set to -1, index is not used.
+ * Note: If index is -1, ctlr must be set.
+ *
+ * Return: a pointer to the new device, or ERR_PTR on error.
+ */
+struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index)
+{
+ acpi_handle parent_handle = NULL;
+ struct list_head resource_list;
+ struct acpi_spi_lookup lookup = {};
+ struct spi_device *spi;
+ int ret;
+
+ if (!ctlr && index == -1)
+ return ERR_PTR(-EINVAL);
+
+ lookup.ctlr = ctlr;
+ lookup.irq = -1;
+ lookup.index = index;
+ lookup.n = 0;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_spi_add_resource, &lookup);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0)
+ /* Found SPI in _CRS but it points to another controller */
+ return ERR_PTR(ret);
+
+ if (!lookup.max_speed_hz &&
+ ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
+ ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
+ /* Apple does not use _CRS but nested devices for SPI slaves */
+ acpi_spi_parse_apple_properties(adev, &lookup);
+ }
+
+ if (!lookup.max_speed_hz)
+ return ERR_PTR(-ENODEV);
+
+ spi = spi_alloc_device(lookup.ctlr);
+ if (!spi) {
+ dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
+ dev_name(&adev->dev));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ACPI_COMPANION_SET(&spi->dev, adev);
+ spi->max_speed_hz = lookup.max_speed_hz;
+ spi->mode |= lookup.mode;
+ spi->irq = lookup.irq;
+ spi->bits_per_word = lookup.bits_per_word;
+ spi_set_chipselect(spi, 0, lookup.chip_select);
+
+ return spi;
+}
+EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
+
+static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
+ struct acpi_device *adev)
+{
+ struct spi_device *spi;
+
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
+ return AE_OK;
+
+ spi = acpi_spi_device_alloc(ctlr, adev, -1);
+ if (IS_ERR(spi)) {
+ if (PTR_ERR(spi) == -ENOMEM)
+ return AE_NO_MEMORY;
+ else
+ return AE_OK;
+ }
+
+ acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
+ sizeof(spi->modalias));
+
+ if (spi->irq < 0)
+ spi->irq = acpi_dev_gpio_irq_get(adev, 0);
+
+ acpi_device_set_enumerated(adev);
+
+ adev->power.flags.ignore_parent = true;
+ if (spi_add_device(spi)) {
+ adev->power.flags.ignore_parent = false;
+ dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
+ spi_dev_put(spi);
+ }
+
+ return AE_OK;
+}
+
+static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
+ struct spi_controller *ctlr = data;
+
+ if (!adev)
+ return AE_OK;
+
+ return acpi_register_spi_device(ctlr, adev);
+}
+
+#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
+
+static void acpi_register_spi_devices(struct spi_controller *ctlr)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(ctlr->dev.parent);
+ if (!handle)
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ SPI_ACPI_ENUMERATE_MAX_DEPTH,
+ acpi_spi_add_device, NULL, ctlr, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
+}
+#else
+static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
+#endif /* CONFIG_ACPI */
+
+static void spi_controller_release(struct device *dev)
+{
+ struct spi_controller *ctlr;
+
+ ctlr = container_of(dev, struct spi_controller, dev);
+ kfree(ctlr);
+}
+
+static struct class spi_master_class = {
+ .name = "spi_master",
+ .owner = THIS_MODULE,
+ .dev_release = spi_controller_release,
+ .dev_groups = spi_master_groups,
+};
+
+#ifdef CONFIG_SPI_SLAVE
+/**
+ * spi_slave_abort - abort the ongoing transfer request on an SPI slave
+ * controller
+ * @spi: device used for the current transfer
+ */
+int spi_slave_abort(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+
+ if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
+ return ctlr->slave_abort(ctlr);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(spi_slave_abort);
+
+static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct spi_controller *ctlr = container_of(dev, struct spi_controller,
+ dev);
+ struct device *child;
+
+ child = device_find_any_child(&ctlr->dev);
+ return sprintf(buf, "%s\n",
+ child ? to_spi_device(child)->modalias : NULL);
+}
+
+static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct spi_controller *ctlr = container_of(dev, struct spi_controller,
+ dev);
+ struct spi_device *spi;
+ struct device *child;
+ char name[32];
+ int rc;
+
+ rc = sscanf(buf, "%31s", name);
+ if (rc != 1 || !name[0])
+ return -EINVAL;
+
+ child = device_find_any_child(&ctlr->dev);
+ if (child) {
+ /* Remove registered slave */
+ device_unregister(child);
+ put_device(child);
+ }
+
+ if (strcmp(name, "(null)")) {
+ /* Register new slave */
+ spi = spi_alloc_device(ctlr);
+ if (!spi)
+ return -ENOMEM;
+
+ strscpy(spi->modalias, name, sizeof(spi->modalias));
+
+ rc = spi_add_device(spi);
+ if (rc) {
+ spi_dev_put(spi);
+ return rc;
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(slave);
+
+static struct attribute *spi_slave_attrs[] = {
+ &dev_attr_slave.attr,
+ NULL,
+};
+
+static const struct attribute_group spi_slave_group = {
+ .attrs = spi_slave_attrs,
+};
+
+static const struct attribute_group *spi_slave_groups[] = {
+ &spi_controller_statistics_group,
+ &spi_slave_group,
+ NULL,
+};
+
+static struct class spi_slave_class = {
+ .name = "spi_slave",
+ .owner = THIS_MODULE,
+ .dev_release = spi_controller_release,
+ .dev_groups = spi_slave_groups,
+};
+#else
+extern struct class spi_slave_class; /* dummy */
+#endif
+
+/**
+ * __spi_alloc_controller - allocate an SPI master or slave controller
+ * @dev: the controller, possibly using the platform_bus
+ * @size: how much zeroed driver-private data to allocate; the pointer to this
+ * memory is in the driver_data field of the returned device, accessible
+ * with spi_controller_get_devdata(); the memory is cacheline aligned;
+ * drivers granting DMA access to portions of their private data need to
+ * round up @size using ALIGN(size, dma_get_cache_alignment()).
+ * @slave: flag indicating whether to allocate an SPI master (false) or SPI
+ * slave (true) controller
+ * Context: can sleep
+ *
+ * This call is used only by SPI controller drivers, which are the
+ * only ones directly touching chip registers. It's how they allocate
+ * an spi_controller structure, prior to calling spi_register_controller().
+ *
+ * This must be called from context that can sleep.
+ *
+ * The caller is responsible for assigning the bus number and initializing the
+ * controller's methods before calling spi_register_controller(); and (after
+ * errors adding the device) calling spi_controller_put() to prevent a memory
+ * leak.
+ *
+ * Return: the SPI controller structure on success, else NULL.
+ */
+struct spi_controller *__spi_alloc_controller(struct device *dev,
+ unsigned int size, bool slave)
+{
+ struct spi_controller *ctlr;
+ size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
+
+ if (!dev)
+ return NULL;
+
+ ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
+ if (!ctlr)
+ return NULL;
+
+ device_initialize(&ctlr->dev);
+ INIT_LIST_HEAD(&ctlr->queue);
+ spin_lock_init(&ctlr->queue_lock);
+ spin_lock_init(&ctlr->bus_lock_spinlock);
+ mutex_init(&ctlr->bus_lock_mutex);
+ mutex_init(&ctlr->io_mutex);
+ mutex_init(&ctlr->add_lock);
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 1;
+ ctlr->slave = slave;
+ if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
+ ctlr->dev.class = &spi_slave_class;
+ else
+ ctlr->dev.class = &spi_master_class;
+ ctlr->dev.parent = dev;
+ pm_suspend_ignore_children(&ctlr->dev, true);
+ spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
+
+ return ctlr;
+}
+EXPORT_SYMBOL_GPL(__spi_alloc_controller);
+
+static void devm_spi_release_controller(struct device *dev, void *ctlr)
+{
+ spi_controller_put(*(struct spi_controller **)ctlr);
+}
+
+/**
+ * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
+ * @dev: physical device of SPI controller
+ * @size: how much zeroed driver-private data to allocate
+ * @slave: whether to allocate an SPI master (false) or SPI slave (true)
+ * Context: can sleep
+ *
+ * Allocate an SPI controller and automatically release a reference on it
+ * when @dev is unbound from its driver. Drivers are thus relieved from
+ * having to call spi_controller_put().
+ *
+ * The arguments to this function are identical to __spi_alloc_controller().
+ *
+ * Return: the SPI controller structure on success, else NULL.
+ */
+struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ unsigned int size,
+ bool slave)
+{
+ struct spi_controller **ptr, *ctlr;
+
+ ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ ctlr = __spi_alloc_controller(dev, size, slave);
+ if (ctlr) {
+ ctlr->devm_allocated = true;
+ *ptr = ctlr;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ctlr;
+}
+EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
+
+/**
+ * spi_get_gpio_descs() - grab chip select GPIOs for the master
+ * @ctlr: The SPI master to grab GPIO descriptors for
+ */
+static int spi_get_gpio_descs(struct spi_controller *ctlr)
+{
+ int nb, i;
+ struct gpio_desc **cs;
+ struct device *dev = &ctlr->dev;
+ unsigned long native_cs_mask = 0;
+ unsigned int num_cs_gpios = 0;
+
+ nb = gpiod_count(dev, "cs");
+ if (nb < 0) {
+ /* No GPIOs at all is fine, else return the error */
+ if (nb == -ENOENT)
+ return 0;
+ return nb;
+ }
+
+ ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
+
+ cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
+ GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ ctlr->cs_gpiods = cs;
+
+ for (i = 0; i < nb; i++) {
+ /*
+ * Most chipselects are active low, the inverted
+ * semantics are handled by special quirks in gpiolib,
+ * so initializing them GPIOD_OUT_LOW here means
+ * "unasserted", in most cases this will drive the physical
+ * line high.
+ */
+ cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(cs[i]))
+ return PTR_ERR(cs[i]);
+
+ if (cs[i]) {
+ /*
+ * If we find a CS GPIO, name it after the device and
+ * chip select line.
+ */
+ char *gpioname;
+
+ gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
+ dev_name(dev), i);
+ if (!gpioname)
+ return -ENOMEM;
+ gpiod_set_consumer_name(cs[i], gpioname);
+ num_cs_gpios++;
+ continue;
+ }
+
+ if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ native_cs_mask |= BIT(i);
+ }
+
+ ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
+
+ if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
+ ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int spi_controller_check_ops(struct spi_controller *ctlr)
+{
+ /*
+ * The controller may implement only the high-level SPI-memory like
+ * operations if it does not support regular SPI transfers, and this is
+ * valid use case.
+ * If ->mem_ops is NULL, we request that at least one of the
+ * ->transfer_xxx() method be implemented.
+ */
+ if (ctlr->mem_ops) {
+ if (!ctlr->mem_ops->exec_op)
+ return -EINVAL;
+ } else if (!ctlr->transfer && !ctlr->transfer_one &&
+ !ctlr->transfer_one_message) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_register_controller - register SPI master or slave controller
+ * @ctlr: initialized master, originally from spi_alloc_master() or
+ * spi_alloc_slave()
+ * Context: can sleep
+ *
+ * SPI controllers connect to their drivers using some non-SPI bus,
+ * such as the platform bus. The final stage of probe() in that code
+ * includes calling spi_register_controller() to hook up to this SPI bus glue.
+ *
+ * SPI controllers use board specific (often SOC specific) bus numbers,
+ * and board-specific addressing for SPI devices combines those numbers
+ * with chip select numbers. Since SPI does not directly support dynamic
+ * device identification, boards need configuration tables telling which
+ * chip is at which address.
+ *
+ * This must be called from context that can sleep. It returns zero on
+ * success, else a negative error code (dropping the controller's refcount).
+ * After a successful return, the caller is responsible for calling
+ * spi_unregister_controller().
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int spi_register_controller(struct spi_controller *ctlr)
+{
+ struct device *dev = ctlr->dev.parent;
+ struct boardinfo *bi;
+ int status;
+ int id, first_dynamic;
+
+ if (!dev)
+ return -ENODEV;
+
+ /*
+ * Make sure all necessary hooks are implemented before registering
+ * the SPI controller.
+ */
+ status = spi_controller_check_ops(ctlr);
+ if (status)
+ return status;
+
+ if (ctlr->bus_num >= 0) {
+ /* Devices with a fixed bus num must check-in with the num */
+ mutex_lock(&board_lock);
+ id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
+ ctlr->bus_num + 1, GFP_KERNEL);
+ mutex_unlock(&board_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id == -ENOSPC ? -EBUSY : id;
+ ctlr->bus_num = id;
+ } else if (ctlr->dev.of_node) {
+ /* Allocate dynamic bus number using Linux idr */
+ id = of_alias_get_id(ctlr->dev.of_node, "spi");
+ if (id >= 0) {
+ ctlr->bus_num = id;
+ mutex_lock(&board_lock);
+ id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
+ ctlr->bus_num + 1, GFP_KERNEL);
+ mutex_unlock(&board_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id == -ENOSPC ? -EBUSY : id;
+ }
+ }
+ if (ctlr->bus_num < 0) {
+ first_dynamic = of_alias_get_highest_id("spi");
+ if (first_dynamic < 0)
+ first_dynamic = 0;
+ else
+ first_dynamic++;
+
+ mutex_lock(&board_lock);
+ id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
+ 0, GFP_KERNEL);
+ mutex_unlock(&board_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id;
+ ctlr->bus_num = id;
+ }
+ ctlr->bus_lock_flag = 0;
+ init_completion(&ctlr->xfer_completion);
+ init_completion(&ctlr->cur_msg_completion);
+ if (!ctlr->max_dma_len)
+ ctlr->max_dma_len = INT_MAX;
+
+ /*
+ * Register the device, then userspace will see it.
+ * Registration fails if the bus ID is in use.
+ */
+ dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
+
+ if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
+ status = spi_get_gpio_descs(ctlr);
+ if (status)
+ goto free_bus_id;
+ /*
+ * A controller using GPIO descriptors always
+ * supports SPI_CS_HIGH if need be.
+ */
+ ctlr->mode_bits |= SPI_CS_HIGH;
+ }
+
+ /*
+ * Even if it's just one always-selected device, there must
+ * be at least one chipselect.
+ */
+ if (!ctlr->num_chipselect) {
+ status = -EINVAL;
+ goto free_bus_id;
+ }
+
+ /* Setting last_cs to -1 means no chip selected */
+ ctlr->last_cs = -1;
+
+ status = device_add(&ctlr->dev);
+ if (status < 0)
+ goto free_bus_id;
+ dev_dbg(dev, "registered %s %s\n",
+ spi_controller_is_slave(ctlr) ? "slave" : "master",
+ dev_name(&ctlr->dev));
+
+ /*
+ * If we're using a queued driver, start the queue. Note that we don't
+ * need the queueing logic if the driver is only supporting high-level
+ * memory operations.
+ */
+ if (ctlr->transfer) {
+ dev_info(dev, "controller is unqueued, this is deprecated\n");
+ } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
+ status = spi_controller_initialize_queue(ctlr);
+ if (status) {
+ device_del(&ctlr->dev);
+ goto free_bus_id;
+ }
+ }
+ /* Add statistics */
+ ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
+ if (!ctlr->pcpu_statistics) {
+ dev_err(dev, "Error allocating per-cpu statistics\n");
+ status = -ENOMEM;
+ goto destroy_queue;
+ }
+
+ mutex_lock(&board_lock);
+ list_add_tail(&ctlr->list, &spi_controller_list);
+ list_for_each_entry(bi, &board_list, list)
+ spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
+ mutex_unlock(&board_lock);
+
+ /* Register devices from the device tree and ACPI */
+ of_register_spi_devices(ctlr);
+ acpi_register_spi_devices(ctlr);
+ return status;
+
+destroy_queue:
+ spi_destroy_queue(ctlr);
+free_bus_id:
+ mutex_lock(&board_lock);
+ idr_remove(&spi_master_idr, ctlr->bus_num);
+ mutex_unlock(&board_lock);
+ return status;
+}
+EXPORT_SYMBOL_GPL(spi_register_controller);
+
+static void devm_spi_unregister(struct device *dev, void *res)
+{
+ spi_unregister_controller(*(struct spi_controller **)res);
+}
+
+/**
+ * devm_spi_register_controller - register managed SPI master or slave
+ * controller
+ * @dev: device managing SPI controller
+ * @ctlr: initialized controller, originally from spi_alloc_master() or
+ * spi_alloc_slave()
+ * Context: can sleep
+ *
+ * Register a SPI device as with spi_register_controller() which will
+ * automatically be unregistered and freed.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int devm_spi_register_controller(struct device *dev,
+ struct spi_controller *ctlr)
+{
+ struct spi_controller **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = spi_register_controller(ctlr);
+ if (!ret) {
+ *ptr = ctlr;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_spi_register_controller);
+
+static int __unregister(struct device *dev, void *null)
+{
+ spi_unregister_device(to_spi_device(dev));
+ return 0;
+}
+
+/**
+ * spi_unregister_controller - unregister SPI master or slave controller
+ * @ctlr: the controller being unregistered
+ * Context: can sleep
+ *
+ * This call is used only by SPI controller drivers, which are the
+ * only ones directly touching chip registers.
+ *
+ * This must be called from context that can sleep.
+ *
+ * Note that this function also drops a reference to the controller.
+ */
+void spi_unregister_controller(struct spi_controller *ctlr)
+{
+ struct spi_controller *found;
+ int id = ctlr->bus_num;
+
+ /* Prevent addition of new devices, unregister existing ones */
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_lock(&ctlr->add_lock);
+
+ device_for_each_child(&ctlr->dev, NULL, __unregister);
+
+ /* First make sure that this controller was ever added */
+ mutex_lock(&board_lock);
+ found = idr_find(&spi_master_idr, id);
+ mutex_unlock(&board_lock);
+ if (ctlr->queued) {
+ if (spi_destroy_queue(ctlr))
+ dev_err(&ctlr->dev, "queue remove failed\n");
+ }
+ mutex_lock(&board_lock);
+ list_del(&ctlr->list);
+ mutex_unlock(&board_lock);
+
+ device_del(&ctlr->dev);
+
+ /* Free bus id */
+ mutex_lock(&board_lock);
+ if (found == ctlr)
+ idr_remove(&spi_master_idr, id);
+ mutex_unlock(&board_lock);
+
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_unlock(&ctlr->add_lock);
+
+ /* Release the last reference on the controller if its driver
+ * has not yet been converted to devm_spi_alloc_master/slave().
+ */
+ if (!ctlr->devm_allocated)
+ put_device(&ctlr->dev);
+}
+EXPORT_SYMBOL_GPL(spi_unregister_controller);
+
+static inline int __spi_check_suspended(const struct spi_controller *ctlr)
+{
+ return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
+}
+
+static inline void __spi_mark_suspended(struct spi_controller *ctlr)
+{
+ mutex_lock(&ctlr->bus_lock_mutex);
+ ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
+ mutex_unlock(&ctlr->bus_lock_mutex);
+}
+
+static inline void __spi_mark_resumed(struct spi_controller *ctlr)
+{
+ mutex_lock(&ctlr->bus_lock_mutex);
+ ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
+ mutex_unlock(&ctlr->bus_lock_mutex);
+}
+
+int spi_controller_suspend(struct spi_controller *ctlr)
+{
+ int ret = 0;
+
+ /* Basically no-ops for non-queued controllers */
+ if (ctlr->queued) {
+ ret = spi_stop_queue(ctlr);
+ if (ret)
+ dev_err(&ctlr->dev, "queue stop failed\n");
+ }
+
+ __spi_mark_suspended(ctlr);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_controller_suspend);
+
+int spi_controller_resume(struct spi_controller *ctlr)
+{
+ int ret = 0;
+
+ __spi_mark_resumed(ctlr);
+
+ if (ctlr->queued) {
+ ret = spi_start_queue(ctlr);
+ if (ret)
+ dev_err(&ctlr->dev, "queue restart failed\n");
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_controller_resume);
+
+/*-------------------------------------------------------------------------*/
+
+/* Core methods for spi_message alterations */
+
+static void __spi_replace_transfers_release(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ void *res)
+{
+ struct spi_replaced_transfers *rxfer = res;
+ size_t i;
+
+ /* Call extra callback if requested */
+ if (rxfer->release)
+ rxfer->release(ctlr, msg, res);
+
+ /* Insert replaced transfers back into the message */
+ list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
+
+ /* Remove the formerly inserted entries */
+ for (i = 0; i < rxfer->inserted; i++)
+ list_del(&rxfer->inserted_transfers[i].transfer_list);
+}
+
+/**
+ * spi_replace_transfers - replace transfers with several transfers
+ * and register change with spi_message.resources
+ * @msg: the spi_message we work upon
+ * @xfer_first: the first spi_transfer we want to replace
+ * @remove: number of transfers to remove
+ * @insert: the number of transfers we want to insert instead
+ * @release: extra release code necessary in some circumstances
+ * @extradatasize: extra data to allocate (with alignment guarantees
+ * of struct @spi_transfer)
+ * @gfp: gfp flags
+ *
+ * Returns: pointer to @spi_replaced_transfers,
+ * PTR_ERR(...) in case of errors.
+ */
+static struct spi_replaced_transfers *spi_replace_transfers(
+ struct spi_message *msg,
+ struct spi_transfer *xfer_first,
+ size_t remove,
+ size_t insert,
+ spi_replaced_release_t release,
+ size_t extradatasize,
+ gfp_t gfp)
+{
+ struct spi_replaced_transfers *rxfer;
+ struct spi_transfer *xfer;
+ size_t i;
+
+ /* Allocate the structure using spi_res */
+ rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
+ struct_size(rxfer, inserted_transfers, insert)
+ + extradatasize,
+ gfp);
+ if (!rxfer)
+ return ERR_PTR(-ENOMEM);
+
+ /* The release code to invoke before running the generic release */
+ rxfer->release = release;
+
+ /* Assign extradata */
+ if (extradatasize)
+ rxfer->extradata =
+ &rxfer->inserted_transfers[insert];
+
+ /* Init the replaced_transfers list */
+ INIT_LIST_HEAD(&rxfer->replaced_transfers);
+
+ /*
+ * Assign the list_entry after which we should reinsert
+ * the @replaced_transfers - it may be spi_message.messages!
+ */
+ rxfer->replaced_after = xfer_first->transfer_list.prev;
+
+ /* Remove the requested number of transfers */
+ for (i = 0; i < remove; i++) {
+ /*
+ * If the entry after replaced_after it is msg->transfers
+ * then we have been requested to remove more transfers
+ * than are in the list.
+ */
+ if (rxfer->replaced_after->next == &msg->transfers) {
+ dev_err(&msg->spi->dev,
+ "requested to remove more spi_transfers than are available\n");
+ /* Insert replaced transfers back into the message */
+ list_splice(&rxfer->replaced_transfers,
+ rxfer->replaced_after);
+
+ /* Free the spi_replace_transfer structure... */
+ spi_res_free(rxfer);
+
+ /* ...and return with an error */
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Remove the entry after replaced_after from list of
+ * transfers and add it to list of replaced_transfers.
+ */
+ list_move_tail(rxfer->replaced_after->next,
+ &rxfer->replaced_transfers);
+ }
+
+ /*
+ * Create copy of the given xfer with identical settings
+ * based on the first transfer to get removed.
+ */
+ for (i = 0; i < insert; i++) {
+ /* We need to run in reverse order */
+ xfer = &rxfer->inserted_transfers[insert - 1 - i];
+
+ /* Copy all spi_transfer data */
+ memcpy(xfer, xfer_first, sizeof(*xfer));
+
+ /* Add to list */
+ list_add(&xfer->transfer_list, rxfer->replaced_after);
+
+ /* Clear cs_change and delay for all but the last */
+ if (i) {
+ xfer->cs_change = false;
+ xfer->delay.value = 0;
+ }
+ }
+
+ /* Set up inserted... */
+ rxfer->inserted = insert;
+
+ /* ...and register it with spi_res/spi_message */
+ spi_res_add(msg, rxfer);
+
+ return rxfer;
+}
+
+static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ struct spi_transfer **xferp,
+ size_t maxsize,
+ gfp_t gfp)
+{
+ struct spi_transfer *xfer = *xferp, *xfers;
+ struct spi_replaced_transfers *srt;
+ size_t offset;
+ size_t count, i;
+
+ /* Calculate how many we have to replace */
+ count = DIV_ROUND_UP(xfer->len, maxsize);
+
+ /* Create replacement */
+ srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
+ if (IS_ERR(srt))
+ return PTR_ERR(srt);
+ xfers = srt->inserted_transfers;
+
+ /*
+ * Now handle each of those newly inserted spi_transfers.
+ * Note that the replacements spi_transfers all are preset
+ * to the same values as *xferp, so tx_buf, rx_buf and len
+ * are all identical (as well as most others)
+ * so we just have to fix up len and the pointers.
+ *
+ * This also includes support for the depreciated
+ * spi_message.is_dma_mapped interface.
+ */
+
+ /*
+ * The first transfer just needs the length modified, so we
+ * run it outside the loop.
+ */
+ xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
+
+ /* All the others need rx_buf/tx_buf also set */
+ for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
+ /* Update rx_buf, tx_buf and dma */
+ if (xfers[i].rx_buf)
+ xfers[i].rx_buf += offset;
+ if (xfers[i].rx_dma)
+ xfers[i].rx_dma += offset;
+ if (xfers[i].tx_buf)
+ xfers[i].tx_buf += offset;
+ if (xfers[i].tx_dma)
+ xfers[i].tx_dma += offset;
+
+ /* Update length */
+ xfers[i].len = min(maxsize, xfers[i].len - offset);
+ }
+
+ /*
+ * We set up xferp to the last entry we have inserted,
+ * so that we skip those already split transfers.
+ */
+ *xferp = &xfers[count - 1];
+
+ /* Increment statistics counters */
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
+ transfers_split_maxsize);
+ SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
+ transfers_split_maxsize);
+
+ return 0;
+}
+
+/**
+ * spi_split_transfers_maxsize - split spi transfers into multiple transfers
+ * when an individual transfer exceeds a
+ * certain size
+ * @ctlr: the @spi_controller for this transfer
+ * @msg: the @spi_message to transform
+ * @maxsize: the maximum when to apply this
+ * @gfp: GFP allocation flags
+ *
+ * Return: status of transformation
+ */
+int spi_split_transfers_maxsize(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ size_t maxsize,
+ gfp_t gfp)
+{
+ struct spi_transfer *xfer;
+ int ret;
+
+ /*
+ * Iterate over the transfer_list,
+ * but note that xfer is advanced to the last transfer inserted
+ * to avoid checking sizes again unnecessarily (also xfer does
+ * potentially belong to a different list by the time the
+ * replacement has happened).
+ */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->len > maxsize) {
+ ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
+ maxsize, gfp);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
+
+/*-------------------------------------------------------------------------*/
+
+/* Core methods for SPI controller protocol drivers. Some of the
+ * other core methods are currently defined as inline functions.
+ */
+
+static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
+ u8 bits_per_word)
+{
+ if (ctlr->bits_per_word_mask) {
+ /* Only 32 bits fit in the mask */
+ if (bits_per_word > 32)
+ return -EINVAL;
+ if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_set_cs_timing - configure CS setup, hold, and inactive delays
+ * @spi: the device that requires specific CS timing configuration
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int spi_set_cs_timing(struct spi_device *spi)
+{
+ struct device *parent = spi->controller->dev.parent;
+ int status = 0;
+
+ if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
+ if (spi->controller->auto_runtime_pm) {
+ status = pm_runtime_get_sync(parent);
+ if (status < 0) {
+ pm_runtime_put_noidle(parent);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+ return status;
+ }
+
+ status = spi->controller->set_cs_timing(spi);
+ pm_runtime_mark_last_busy(parent);
+ pm_runtime_put_autosuspend(parent);
+ } else {
+ status = spi->controller->set_cs_timing(spi);
+ }
+ }
+ return status;
+}
+
+/**
+ * spi_setup - setup SPI mode and clock rate
+ * @spi: the device whose settings are being modified
+ * Context: can sleep, and no requests are queued to the device
+ *
+ * SPI protocol drivers may need to update the transfer mode if the
+ * device doesn't work with its default. They may likewise need
+ * to update clock rates or word sizes from initial values. This function
+ * changes those settings, and must be called from a context that can sleep.
+ * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
+ * effect the next time the device is selected and data is transferred to
+ * or from it. When this function returns, the spi device is deselected.
+ *
+ * Note that this call will fail if the protocol driver specifies an option
+ * that the underlying controller or its driver does not support. For
+ * example, not all hardware supports wire transfers using nine bit words,
+ * LSB-first wire encoding, or active-high chipselects.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int spi_setup(struct spi_device *spi)
+{
+ unsigned bad_bits, ugly_bits;
+ int status = 0;
+
+ /*
+ * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
+ * are set at the same time.
+ */
+ if ((hweight_long(spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
+ (hweight_long(spi->mode &
+ (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
+ dev_err(&spi->dev,
+ "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
+ return -EINVAL;
+ }
+ /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
+ if ((spi->mode & SPI_3WIRE) && (spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
+ SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
+ return -EINVAL;
+ /*
+ * Help drivers fail *cleanly* when they need options
+ * that aren't supported with their current controller.
+ * SPI_CS_WORD has a fallback software implementation,
+ * so it is ignored here.
+ */
+ bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
+ SPI_NO_TX | SPI_NO_RX);
+ ugly_bits = bad_bits &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
+ SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
+ if (ugly_bits) {
+ dev_warn(&spi->dev,
+ "setup: ignoring unsupported mode bits %x\n",
+ ugly_bits);
+ spi->mode &= ~ugly_bits;
+ bad_bits &= ~ugly_bits;
+ }
+ if (bad_bits) {
+ dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
+ bad_bits);
+ return -EINVAL;
+ }
+
+ if (!spi->bits_per_word) {
+ spi->bits_per_word = 8;
+ } else {
+ /*
+ * Some controllers may not support the default 8 bits-per-word
+ * so only perform the check when this is explicitly provided.
+ */
+ status = __spi_validate_bits_per_word(spi->controller,
+ spi->bits_per_word);
+ if (status)
+ return status;
+ }
+
+ if (spi->controller->max_speed_hz &&
+ (!spi->max_speed_hz ||
+ spi->max_speed_hz > spi->controller->max_speed_hz))
+ spi->max_speed_hz = spi->controller->max_speed_hz;
+
+ mutex_lock(&spi->controller->io_mutex);
+
+ if (spi->controller->setup) {
+ status = spi->controller->setup(spi);
+ if (status) {
+ mutex_unlock(&spi->controller->io_mutex);
+ dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
+ status);
+ return status;
+ }
+ }
+
+ status = spi_set_cs_timing(spi);
+ if (status) {
+ mutex_unlock(&spi->controller->io_mutex);
+ return status;
+ }
+
+ if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
+ status = pm_runtime_resume_and_get(spi->controller->dev.parent);
+ if (status < 0) {
+ mutex_unlock(&spi->controller->io_mutex);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+ return status;
+ }
+
+ /*
+ * We do not want to return positive value from pm_runtime_get,
+ * there are many instances of devices calling spi_setup() and
+ * checking for a non-zero return value instead of a negative
+ * return value.
+ */
+ status = 0;
+
+ spi_set_cs(spi, false, true);
+ pm_runtime_mark_last_busy(spi->controller->dev.parent);
+ pm_runtime_put_autosuspend(spi->controller->dev.parent);
+ } else {
+ spi_set_cs(spi, false, true);
+ }
+
+ mutex_unlock(&spi->controller->io_mutex);
+
+ if (spi->rt && !spi->controller->rt) {
+ spi->controller->rt = true;
+ spi_set_thread_rt(spi->controller);
+ }
+
+ trace_spi_setup(spi, status);
+
+ dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
+ spi->mode & SPI_MODE_X_MASK,
+ (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
+ (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
+ (spi->mode & SPI_3WIRE) ? "3wire, " : "",
+ (spi->mode & SPI_LOOP) ? "loopback, " : "",
+ spi->bits_per_word, spi->max_speed_hz,
+ status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(spi_setup);
+
+static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
+ struct spi_device *spi)
+{
+ int delay1, delay2;
+
+ delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
+ if (delay1 < 0)
+ return delay1;
+
+ delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
+ if (delay2 < 0)
+ return delay2;
+
+ if (delay1 < delay2)
+ memcpy(&xfer->word_delay, &spi->word_delay,
+ sizeof(xfer->word_delay));
+
+ return 0;
+}
+
+static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct spi_transfer *xfer;
+ int w_size;
+
+ if (list_empty(&message->transfers))
+ return -EINVAL;
+
+ /*
+ * If an SPI controller does not support toggling the CS line on each
+ * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
+ * for the CS line, we can emulate the CS-per-word hardware function by
+ * splitting transfers into one-word transfers and ensuring that
+ * cs_change is set for each transfer.
+ */
+ if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
+ spi_get_csgpiod(spi, 0))) {
+ size_t maxsize;
+ int ret;
+
+ maxsize = (spi->bits_per_word + 7) / 8;
+
+ /* spi_split_transfers_maxsize() requires message->spi */
+ message->spi = spi;
+
+ ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ /* Don't change cs_change on the last entry in the list */
+ if (list_is_last(&xfer->transfer_list, &message->transfers))
+ break;
+ xfer->cs_change = 1;
+ }
+ }
+
+ /*
+ * Half-duplex links include original MicroWire, and ones with
+ * only one data pin like SPI_3WIRE (switches direction) or where
+ * either MOSI or MISO is missing. They can also be caused by
+ * software limitations.
+ */
+ if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
+ (spi->mode & SPI_3WIRE)) {
+ unsigned flags = ctlr->flags;
+
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ if (xfer->rx_buf && xfer->tx_buf)
+ return -EINVAL;
+ if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
+ return -EINVAL;
+ if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Set transfer bits_per_word and max speed as spi device default if
+ * it is not set for this transfer.
+ * Set transfer tx_nbits and rx_nbits as single transfer default
+ * (SPI_NBITS_SINGLE) if it is not set for this transfer.
+ * Ensure transfer word_delay is at least as long as that required by
+ * device itself.
+ */
+ message->frame_length = 0;
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->effective_speed_hz = 0;
+ message->frame_length += xfer->len;
+ if (!xfer->bits_per_word)
+ xfer->bits_per_word = spi->bits_per_word;
+
+ if (!xfer->speed_hz)
+ xfer->speed_hz = spi->max_speed_hz;
+
+ if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
+ xfer->speed_hz = ctlr->max_speed_hz;
+
+ if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
+ return -EINVAL;
+
+ /*
+ * SPI transfer length should be multiple of SPI word size
+ * where SPI word size should be power-of-two multiple.
+ */
+ if (xfer->bits_per_word <= 8)
+ w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ w_size = 2;
+ else
+ w_size = 4;
+
+ /* No partial transfers accepted */
+ if (xfer->len % w_size)
+ return -EINVAL;
+
+ if (xfer->speed_hz && ctlr->min_speed_hz &&
+ xfer->speed_hz < ctlr->min_speed_hz)
+ return -EINVAL;
+
+ if (xfer->tx_buf && !xfer->tx_nbits)
+ xfer->tx_nbits = SPI_NBITS_SINGLE;
+ if (xfer->rx_buf && !xfer->rx_nbits)
+ xfer->rx_nbits = SPI_NBITS_SINGLE;
+ /*
+ * Check transfer tx/rx_nbits:
+ * 1. check the value matches one of single, dual and quad
+ * 2. check tx/rx_nbits match the mode in spi_device
+ */
+ if (xfer->tx_buf) {
+ if (spi->mode & SPI_NO_TX)
+ return -EINVAL;
+ if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
+ xfer->tx_nbits != SPI_NBITS_DUAL &&
+ xfer->tx_nbits != SPI_NBITS_QUAD)
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
+ !(spi->mode & SPI_TX_QUAD))
+ return -EINVAL;
+ }
+ /* Check transfer rx_nbits */
+ if (xfer->rx_buf) {
+ if (spi->mode & SPI_NO_RX)
+ return -EINVAL;
+ if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
+ xfer->rx_nbits != SPI_NBITS_DUAL &&
+ xfer->rx_nbits != SPI_NBITS_QUAD)
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
+ !(spi->mode & SPI_RX_QUAD))
+ return -EINVAL;
+ }
+
+ if (_spi_xfer_word_delay_update(xfer, spi))
+ return -EINVAL;
+ }
+
+ message->status = -EINPROGRESS;
+
+ return 0;
+}
+
+static int __spi_async(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct spi_transfer *xfer;
+
+ /*
+ * Some controllers do not support doing regular SPI transfers. Return
+ * ENOTSUPP when this is the case.
+ */
+ if (!ctlr->transfer)
+ return -ENOTSUPP;
+
+ message->spi = spi;
+
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
+ SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
+
+ trace_spi_message_submit(message);
+
+ if (!ctlr->ptp_sts_supported) {
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
+ return ctlr->transfer(spi, message);
+}
+
+/**
+ * spi_async - asynchronous SPI transfer
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers, including completion callback
+ * Context: any (irqs may be blocked, etc)
+ *
+ * This call may be used in_irq and other contexts which can't sleep,
+ * as well as from task contexts which can sleep.
+ *
+ * The completion callback is invoked in a context which can't sleep.
+ * Before that invocation, the value of message->status is undefined.
+ * When the callback is issued, message->status holds either zero (to
+ * indicate complete success) or a negative error code. After that
+ * callback returns, the driver which issued the transfer request may
+ * deallocate the associated memory; it's no longer in use by any SPI
+ * core or controller driver code.
+ *
+ * Note that although all messages to a spi_device are handled in
+ * FIFO order, messages may go to different devices in other orders.
+ * Some device might be higher priority, or have various "hard" access
+ * time requirements, for example.
+ *
+ * On detection of any fault during the transfer, processing of
+ * the entire message is aborted, and the device is deselected.
+ * Until returning from the associated message completion callback,
+ * no other spi_message queued to that device will be processed.
+ * (This rule applies equally to all the synchronous transfer calls,
+ * which are wrappers around this core asynchronous primitive.)
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int spi_async(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_controller *ctlr = spi->controller;
+ int ret;
+ unsigned long flags;
+
+ ret = __spi_validate(spi, message);
+ if (ret != 0)
+ return ret;
+
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+
+ if (ctlr->bus_lock_flag)
+ ret = -EBUSY;
+ else
+ ret = __spi_async(spi, message);
+
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_async);
+
+/**
+ * spi_async_locked - version of spi_async with exclusive bus usage
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers, including completion callback
+ * Context: any (irqs may be blocked, etc)
+ *
+ * This call may be used in_irq and other contexts which can't sleep,
+ * as well as from task contexts which can sleep.
+ *
+ * The completion callback is invoked in a context which can't sleep.
+ * Before that invocation, the value of message->status is undefined.
+ * When the callback is issued, message->status holds either zero (to
+ * indicate complete success) or a negative error code. After that
+ * callback returns, the driver which issued the transfer request may
+ * deallocate the associated memory; it's no longer in use by any SPI
+ * core or controller driver code.
+ *
+ * Note that although all messages to a spi_device are handled in
+ * FIFO order, messages may go to different devices in other orders.
+ * Some device might be higher priority, or have various "hard" access
+ * time requirements, for example.
+ *
+ * On detection of any fault during the transfer, processing of
+ * the entire message is aborted, and the device is deselected.
+ * Until returning from the associated message completion callback,
+ * no other spi_message queued to that device will be processed.
+ * (This rule applies equally to all the synchronous transfer calls,
+ * which are wrappers around this core asynchronous primitive.)
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_controller *ctlr = spi->controller;
+ int ret;
+ unsigned long flags;
+
+ ret = __spi_validate(spi, message);
+ if (ret != 0)
+ return ret;
+
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+
+ ret = __spi_async(spi, message);
+
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+
+ return ret;
+
+}
+
+static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
+{
+ bool was_busy;
+ int ret;
+
+ mutex_lock(&ctlr->io_mutex);
+
+ was_busy = ctlr->busy;
+
+ ctlr->cur_msg = msg;
+ ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+ if (ret)
+ dev_err(&ctlr->dev, "noqueue transfer failed\n");
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
+
+ if (!was_busy) {
+ kfree(ctlr->dummy_rx);
+ ctlr->dummy_rx = NULL;
+ kfree(ctlr->dummy_tx);
+ ctlr->dummy_tx = NULL;
+ if (ctlr->unprepare_transfer_hardware &&
+ ctlr->unprepare_transfer_hardware(ctlr))
+ dev_err(&ctlr->dev,
+ "failed to unprepare transfer hardware\n");
+ spi_idle_runtime_pm(ctlr);
+ }
+
+ mutex_unlock(&ctlr->io_mutex);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Utility methods for SPI protocol drivers, layered on
+ * top of the core. Some other utility methods are defined as
+ * inline functions.
+ */
+
+static void spi_complete(void *arg)
+{
+ complete(arg);
+}
+
+static int __spi_sync(struct spi_device *spi, struct spi_message *message)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ int status;
+ struct spi_controller *ctlr = spi->controller;
+
+ if (__spi_check_suspended(ctlr)) {
+ dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
+ return -ESHUTDOWN;
+ }
+
+ status = __spi_validate(spi, message);
+ if (status != 0)
+ return status;
+
+ message->spi = spi;
+
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
+ SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
+
+ /*
+ * Checking queue_empty here only guarantees async/sync message
+ * ordering when coming from the same context. It does not need to
+ * guard against reentrancy from a different context. The io_mutex
+ * will catch those cases.
+ */
+ if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
+ message->actual_length = 0;
+ message->status = -EINPROGRESS;
+
+ trace_spi_message_submit(message);
+
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
+ SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
+
+ __spi_transfer_message_noqueue(ctlr, message);
+
+ return message->status;
+ }
+
+ /*
+ * There are messages in the async queue that could have originated
+ * from the same context, so we need to preserve ordering.
+ * Therefor we send the message to the async queue and wait until they
+ * are completed.
+ */
+ message->complete = spi_complete;
+ message->context = &done;
+ status = spi_async_locked(spi, message);
+ if (status == 0) {
+ wait_for_completion(&done);
+ status = message->status;
+ }
+ message->context = NULL;
+
+ return status;
+}
+
+/**
+ * spi_sync - blocking/synchronous SPI data transfers
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout. Low-overhead controller
+ * drivers may DMA directly into and out of the message buffers.
+ *
+ * Note that the SPI device's chip select is active during the message,
+ * and then is normally disabled between messages. Drivers for some
+ * frequently-used devices may want to minimize costs of selecting a chip,
+ * by leaving it selected in anticipation that the next message will go
+ * to the same chip. (That may increase power usage.)
+ *
+ * Also, the caller is guaranteeing that the memory associated with the
+ * message will not be freed before this call returns.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int spi_sync(struct spi_device *spi, struct spi_message *message)
+{
+ int ret;
+
+ mutex_lock(&spi->controller->bus_lock_mutex);
+ ret = __spi_sync(spi, message);
+ mutex_unlock(&spi->controller->bus_lock_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_sync);
+
+/**
+ * spi_sync_locked - version of spi_sync with exclusive bus usage
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout. Low-overhead controller
+ * drivers may DMA directly into and out of the message buffers.
+ *
+ * This call should be used by drivers that require exclusive access to the
+ * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
+ * be released by a spi_bus_unlock call when the exclusive access is over.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
+{
+ return __spi_sync(spi, message);
+}
+EXPORT_SYMBOL_GPL(spi_sync_locked);
+
+/**
+ * spi_bus_lock - obtain a lock for exclusive SPI bus usage
+ * @ctlr: SPI bus master that should be locked for exclusive bus access
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout.
+ *
+ * This call should be used by drivers that require exclusive access to the
+ * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
+ * exclusive access is over. Data transfer must be done by spi_sync_locked
+ * and spi_async_locked calls when the SPI bus lock is held.
+ *
+ * Return: always zero.
+ */
+int spi_bus_lock(struct spi_controller *ctlr)
+{
+ unsigned long flags;
+
+ mutex_lock(&ctlr->bus_lock_mutex);
+
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+ ctlr->bus_lock_flag = 1;
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+
+ /* Mutex remains locked until spi_bus_unlock() is called */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bus_lock);
+
+/**
+ * spi_bus_unlock - release the lock for exclusive SPI bus usage
+ * @ctlr: SPI bus master that was locked for exclusive bus access
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout.
+ *
+ * This call releases an SPI bus lock previously obtained by an spi_bus_lock
+ * call.
+ *
+ * Return: always zero.
+ */
+int spi_bus_unlock(struct spi_controller *ctlr)
+{
+ ctlr->bus_lock_flag = 0;
+
+ mutex_unlock(&ctlr->bus_lock_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bus_unlock);
+
+/* Portable code must never pass more than 32 bytes */
+#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
+
+static u8 *buf;
+
+/**
+ * spi_write_then_read - SPI synchronous write followed by read
+ * @spi: device with which data will be exchanged
+ * @txbuf: data to be written (need not be dma-safe)
+ * @n_tx: size of txbuf, in bytes
+ * @rxbuf: buffer into which data will be read (need not be dma-safe)
+ * @n_rx: size of rxbuf, in bytes
+ * Context: can sleep
+ *
+ * This performs a half duplex MicroWire style transaction with the
+ * device, sending txbuf and then reading rxbuf. The return value
+ * is zero for success, else a negative errno status code.
+ * This call may only be used from a context that may sleep.
+ *
+ * Parameters to this routine are always copied using a small buffer.
+ * Performance-sensitive or bulk transfer code should instead use
+ * spi_{async,sync}() calls with dma-safe buffers.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int spi_write_then_read(struct spi_device *spi,
+ const void *txbuf, unsigned n_tx,
+ void *rxbuf, unsigned n_rx)
+{
+ static DEFINE_MUTEX(lock);
+
+ int status;
+ struct spi_message message;
+ struct spi_transfer x[2];
+ u8 *local_buf;
+
+ /*
+ * Use preallocated DMA-safe buffer if we can. We can't avoid
+ * copying here, (as a pure convenience thing), but we can
+ * keep heap costs out of the hot path unless someone else is
+ * using the pre-allocated buffer or the transfer is too large.
+ */
+ if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
+ local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
+ GFP_KERNEL | GFP_DMA);
+ if (!local_buf)
+ return -ENOMEM;
+ } else {
+ local_buf = buf;
+ }
+
+ spi_message_init(&message);
+ memset(x, 0, sizeof(x));
+ if (n_tx) {
+ x[0].len = n_tx;
+ spi_message_add_tail(&x[0], &message);
+ }
+ if (n_rx) {
+ x[1].len = n_rx;
+ spi_message_add_tail(&x[1], &message);
+ }
+
+ memcpy(local_buf, txbuf, n_tx);
+ x[0].tx_buf = local_buf;
+ x[1].rx_buf = local_buf + n_tx;
+
+ /* Do the i/o */
+ status = spi_sync(spi, &message);
+ if (status == 0)
+ memcpy(rxbuf, x[1].rx_buf, n_rx);
+
+ if (x[0].tx_buf == buf)
+ mutex_unlock(&lock);
+ else
+ kfree(local_buf);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(spi_write_then_read);
+
+/*-------------------------------------------------------------------------*/
+
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+/* Must call put_device() when done with returned spi_device device */
+static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
+{
+ struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
+
+ return dev ? to_spi_device(dev) : NULL;
+}
+
+/* The spi controllers are not using spi_bus, so we find it with another way */
+static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = class_find_device_by_of_node(&spi_master_class, node);
+ if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
+ dev = class_find_device_by_of_node(&spi_slave_class, node);
+ if (!dev)
+ return NULL;
+
+ /* Reference got in class_find_device */
+ return container_of(dev, struct spi_controller, dev);
+}
+
+static int of_spi_notify(struct notifier_block *nb, unsigned long action,
+ void *arg)
+{
+ struct of_reconfig_data *rd = arg;
+ struct spi_controller *ctlr;
+ struct spi_device *spi;
+
+ switch (of_reconfig_get_state_change(action, arg)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ ctlr = of_find_spi_controller_by_node(rd->dn->parent);
+ if (ctlr == NULL)
+ return NOTIFY_OK; /* Not for us */
+
+ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
+ put_device(&ctlr->dev);
+ return NOTIFY_OK;
+ }
+
+ /*
+ * Clear the flag before adding the device so that fw_devlink
+ * doesn't skip adding consumers to this device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
+ spi = of_register_spi_device(ctlr, rd->dn);
+ put_device(&ctlr->dev);
+
+ if (IS_ERR(spi)) {
+ pr_err("%s: failed to create for '%pOF'\n",
+ __func__, rd->dn);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
+ return notifier_from_errno(PTR_ERR(spi));
+ }
+ break;
+
+ case OF_RECONFIG_CHANGE_REMOVE:
+ /* Already depopulated? */
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
+ /* Find our device by node */
+ spi = of_find_spi_device_by_node(rd->dn);
+ if (spi == NULL)
+ return NOTIFY_OK; /* No? not meant for us */
+
+ /* Unregister takes one ref away */
+ spi_unregister_device(spi);
+
+ /* And put the reference of the find */
+ put_device(&spi->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block spi_of_notifier = {
+ .notifier_call = of_spi_notify,
+};
+#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+extern struct notifier_block spi_of_notifier;
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+#if IS_ENABLED(CONFIG_ACPI)
+static int spi_acpi_controller_match(struct device *dev, const void *data)
+{
+ return ACPI_COMPANION(dev->parent) == data;
+}
+
+static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = class_find_device(&spi_master_class, NULL, adev,
+ spi_acpi_controller_match);
+ if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
+ dev = class_find_device(&spi_slave_class, NULL, adev,
+ spi_acpi_controller_match);
+ if (!dev)
+ return NULL;
+
+ return container_of(dev, struct spi_controller, dev);
+}
+
+static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
+ return to_spi_device(dev);
+}
+
+static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
+ void *arg)
+{
+ struct acpi_device *adev = arg;
+ struct spi_controller *ctlr;
+ struct spi_device *spi;
+
+ switch (value) {
+ case ACPI_RECONFIG_DEVICE_ADD:
+ ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
+ if (!ctlr)
+ break;
+
+ acpi_register_spi_device(ctlr, adev);
+ put_device(&ctlr->dev);
+ break;
+ case ACPI_RECONFIG_DEVICE_REMOVE:
+ if (!acpi_device_enumerated(adev))
+ break;
+
+ spi = acpi_spi_find_device_by_adev(adev);
+ if (!spi)
+ break;
+
+ spi_unregister_device(spi);
+ put_device(&spi->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block spi_acpi_notifier = {
+ .notifier_call = acpi_spi_notify,
+};
+#else
+extern struct notifier_block spi_acpi_notifier;
+#endif
+
+static int __init spi_init(void)
+{
+ int status;
+
+ buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
+ if (!buf) {
+ status = -ENOMEM;
+ goto err0;
+ }
+
+ status = bus_register(&spi_bus_type);
+ if (status < 0)
+ goto err1;
+
+ status = class_register(&spi_master_class);
+ if (status < 0)
+ goto err2;
+
+ if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
+ status = class_register(&spi_slave_class);
+ if (status < 0)
+ goto err3;
+ }
+
+ if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+ WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
+ if (IS_ENABLED(CONFIG_ACPI))
+ WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
+
+ return 0;
+
+err3:
+ class_unregister(&spi_master_class);
+err2:
+ bus_unregister(&spi_bus_type);
+err1:
+ kfree(buf);
+ buf = NULL;
+err0:
+ return status;
+}
+
+/*
+ * A board_info is normally registered in arch_initcall(),
+ * but even essential drivers wait till later.
+ *
+ * REVISIT only boardinfo really needs static linking. The rest (device and
+ * driver registration) _could_ be dynamically linked (modular) ... Costs
+ * include needing to have boardinfo data structures be much more public.
+ */
+postcore_initcall(spi_init);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
new file mode 100644
index 000000000..71c3db60e
--- /dev/null
+++ b/drivers/spi/spidev.c
@@ -0,0 +1,891 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Simple synchronous userspace interface to SPI devices
+ *
+ * Copyright (C) 2006 SWAPP
+ * Andrea Paterniani <a.paterniani@swapp-eng.it>
+ * Copyright (C) 2007 David Brownell (simplification, cleanup)
+ */
+
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spidev.h>
+
+#include <linux/uaccess.h>
+
+
+/*
+ * This supports access to SPI devices using normal userspace I/O calls.
+ * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
+ * and often mask message boundaries, full SPI support requires full duplex
+ * transfers. There are several kinds of internal message boundaries to
+ * handle chipselect management and other protocol options.
+ *
+ * SPI has a character major number assigned. We allocate minor numbers
+ * dynamically using a bitmask. You must use hotplug tools, such as udev
+ * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
+ * nodes, since there is no fixed association of minor numbers with any
+ * particular SPI bus or device.
+ */
+#define SPIDEV_MAJOR 153 /* assigned */
+#define N_SPI_MINORS 32 /* ... up to 256 */
+
+static DECLARE_BITMAP(minors, N_SPI_MINORS);
+
+static_assert(N_SPI_MINORS > 0 && N_SPI_MINORS <= 256);
+
+/* Bit masks for spi_device.mode management. Note that incorrect
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
+ *
+ * - CS_HIGH ... this device will be active when it shouldn't be
+ * - 3WIRE ... when active, it won't behave as it should
+ * - NO_CS ... there will be no explicit message boundaries; this
+ * is completely incompatible with the shared bus model
+ * - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
+ */
+#define SPI_MODE_MASK (SPI_MODE_X_MASK | SPI_CS_HIGH \
+ | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
+ | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
+ | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
+ | SPI_RX_QUAD | SPI_RX_OCTAL \
+ | SPI_RX_CPHA_FLIP)
+
+struct spidev_data {
+ dev_t devt;
+ spinlock_t spi_lock;
+ struct spi_device *spi;
+ struct list_head device_entry;
+
+ /* TX/RX buffers are NULL unless this device is open (users > 0) */
+ struct mutex buf_lock;
+ unsigned users;
+ u8 *tx_buffer;
+ u8 *rx_buffer;
+ u32 speed_hz;
+};
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_lock);
+
+static unsigned bufsiz = 4096;
+module_param(bufsiz, uint, S_IRUGO);
+MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t
+spidev_sync(struct spidev_data *spidev, struct spi_message *message)
+{
+ int status;
+ struct spi_device *spi;
+
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spidev->spi;
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (spi == NULL)
+ status = -ESHUTDOWN;
+ else
+ status = spi_sync(spi, message);
+
+ if (status == 0)
+ status = message->actual_length;
+
+ return status;
+}
+
+static inline ssize_t
+spidev_sync_write(struct spidev_data *spidev, size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = spidev->tx_buffer,
+ .len = len,
+ .speed_hz = spidev->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spidev_sync(spidev, &m);
+}
+
+static inline ssize_t
+spidev_sync_read(struct spidev_data *spidev, size_t len)
+{
+ struct spi_transfer t = {
+ .rx_buf = spidev->rx_buffer,
+ .len = len,
+ .speed_hz = spidev->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spidev_sync(spidev, &m);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Read-only message with current device setup */
+static ssize_t
+spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct spidev_data *spidev;
+ ssize_t status;
+
+ /* chipselect only toggles at start or end of operation */
+ if (count > bufsiz)
+ return -EMSGSIZE;
+
+ spidev = filp->private_data;
+
+ mutex_lock(&spidev->buf_lock);
+ status = spidev_sync_read(spidev, count);
+ if (status > 0) {
+ unsigned long missing;
+
+ missing = copy_to_user(buf, spidev->rx_buffer, status);
+ if (missing == status)
+ status = -EFAULT;
+ else
+ status = status - missing;
+ }
+ mutex_unlock(&spidev->buf_lock);
+
+ return status;
+}
+
+/* Write-only message with current device setup */
+static ssize_t
+spidev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct spidev_data *spidev;
+ ssize_t status;
+ unsigned long missing;
+
+ /* chipselect only toggles at start or end of operation */
+ if (count > bufsiz)
+ return -EMSGSIZE;
+
+ spidev = filp->private_data;
+
+ mutex_lock(&spidev->buf_lock);
+ missing = copy_from_user(spidev->tx_buffer, buf, count);
+ if (missing == 0)
+ status = spidev_sync_write(spidev, count);
+ else
+ status = -EFAULT;
+ mutex_unlock(&spidev->buf_lock);
+
+ return status;
+}
+
+static int spidev_message(struct spidev_data *spidev,
+ struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
+{
+ struct spi_message msg;
+ struct spi_transfer *k_xfers;
+ struct spi_transfer *k_tmp;
+ struct spi_ioc_transfer *u_tmp;
+ unsigned n, total, tx_total, rx_total;
+ u8 *tx_buf, *rx_buf;
+ int status = -EFAULT;
+
+ spi_message_init(&msg);
+ k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
+ if (k_xfers == NULL)
+ return -ENOMEM;
+
+ /* Construct spi_message, copying any tx data to bounce buffer.
+ * We walk the array of user-provided transfers, using each one
+ * to initialize a kernel version of the same transfer.
+ */
+ tx_buf = spidev->tx_buffer;
+ rx_buf = spidev->rx_buffer;
+ total = 0;
+ tx_total = 0;
+ rx_total = 0;
+ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+ n;
+ n--, k_tmp++, u_tmp++) {
+ /* Ensure that also following allocations from rx_buf/tx_buf will meet
+ * DMA alignment requirements.
+ */
+ unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
+
+ k_tmp->len = u_tmp->len;
+
+ total += k_tmp->len;
+ /* Since the function returns the total length of transfers
+ * on success, restrict the total to positive int values to
+ * avoid the return value looking like an error. Also check
+ * each transfer length to avoid arithmetic overflow.
+ */
+ if (total > INT_MAX || k_tmp->len > INT_MAX) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+
+ if (u_tmp->rx_buf) {
+ /* this transfer needs space in RX bounce buffer */
+ rx_total += len_aligned;
+ if (rx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->rx_buf = rx_buf;
+ rx_buf += len_aligned;
+ }
+ if (u_tmp->tx_buf) {
+ /* this transfer needs space in TX bounce buffer */
+ tx_total += len_aligned;
+ if (tx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->tx_buf = tx_buf;
+ if (copy_from_user(tx_buf, (const u8 __user *)
+ (uintptr_t) u_tmp->tx_buf,
+ u_tmp->len))
+ goto done;
+ tx_buf += len_aligned;
+ }
+
+ k_tmp->cs_change = !!u_tmp->cs_change;
+ k_tmp->tx_nbits = u_tmp->tx_nbits;
+ k_tmp->rx_nbits = u_tmp->rx_nbits;
+ k_tmp->bits_per_word = u_tmp->bits_per_word;
+ k_tmp->delay.value = u_tmp->delay_usecs;
+ k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
+ k_tmp->speed_hz = u_tmp->speed_hz;
+ k_tmp->word_delay.value = u_tmp->word_delay_usecs;
+ k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
+ if (!k_tmp->speed_hz)
+ k_tmp->speed_hz = spidev->speed_hz;
+#ifdef VERBOSE
+ dev_dbg(&spidev->spi->dev,
+ " xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
+ k_tmp->len,
+ k_tmp->rx_buf ? "rx " : "",
+ k_tmp->tx_buf ? "tx " : "",
+ k_tmp->cs_change ? "cs " : "",
+ k_tmp->bits_per_word ? : spidev->spi->bits_per_word,
+ k_tmp->delay.value,
+ k_tmp->word_delay.value,
+ k_tmp->speed_hz ? : spidev->spi->max_speed_hz);
+#endif
+ spi_message_add_tail(k_tmp, &msg);
+ }
+
+ status = spidev_sync(spidev, &msg);
+ if (status < 0)
+ goto done;
+
+ /* copy any rx data out of bounce buffer */
+ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+ n;
+ n--, k_tmp++, u_tmp++) {
+ if (u_tmp->rx_buf) {
+ if (copy_to_user((u8 __user *)
+ (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
+ u_tmp->len)) {
+ status = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ status = total;
+
+done:
+ kfree(k_xfers);
+ return status;
+}
+
+static struct spi_ioc_transfer *
+spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
+ unsigned *n_ioc)
+{
+ u32 tmp;
+
+ /* Check type, command number and direction */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
+ || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
+ || _IOC_DIR(cmd) != _IOC_WRITE)
+ return ERR_PTR(-ENOTTY);
+
+ tmp = _IOC_SIZE(cmd);
+ if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
+ return ERR_PTR(-EINVAL);
+ *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
+ if (*n_ioc == 0)
+ return NULL;
+
+ /* copy into scratch area */
+ return memdup_user(u_ioc, tmp);
+}
+
+static long
+spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ struct spidev_data *spidev;
+ struct spi_device *spi;
+ u32 tmp;
+ unsigned n_ioc;
+ struct spi_ioc_transfer *ioc;
+
+ /* Check type and command number */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
+ return -ENOTTY;
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ spidev = filp->private_data;
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spi_dev_get(spidev->spi);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* use the buffer lock here for triple duty:
+ * - prevent I/O (from us) so calling spi_setup() is safe;
+ * - prevent concurrent SPI_IOC_WR_* from morphing
+ * data fields while SPI_IOC_RD_* reads them;
+ * - SPI_IOC_MESSAGE needs the buffer locked "normally".
+ */
+ mutex_lock(&spidev->buf_lock);
+
+ switch (cmd) {
+ /* read requests */
+ case SPI_IOC_RD_MODE:
+ case SPI_IOC_RD_MODE32:
+ tmp = spi->mode;
+
+ {
+ struct spi_controller *ctlr = spi->controller;
+
+ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select])
+ tmp &= ~SPI_CS_HIGH;
+ }
+
+ if (cmd == SPI_IOC_RD_MODE)
+ retval = put_user(tmp & SPI_MODE_MASK,
+ (__u8 __user *)arg);
+ else
+ retval = put_user(tmp & SPI_MODE_MASK,
+ (__u32 __user *)arg);
+ break;
+ case SPI_IOC_RD_LSB_FIRST:
+ retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
+ (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_BITS_PER_WORD:
+ retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_MAX_SPEED_HZ:
+ retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
+ break;
+
+ /* write requests */
+ case SPI_IOC_WR_MODE:
+ case SPI_IOC_WR_MODE32:
+ if (cmd == SPI_IOC_WR_MODE)
+ retval = get_user(tmp, (u8 __user *)arg);
+ else
+ retval = get_user(tmp, (u32 __user *)arg);
+ if (retval == 0) {
+ struct spi_controller *ctlr = spi->controller;
+ u32 save = spi->mode;
+
+ if (tmp & ~SPI_MODE_MASK) {
+ retval = -EINVAL;
+ break;
+ }
+
+ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select])
+ tmp |= SPI_CS_HIGH;
+
+ tmp |= spi->mode & ~SPI_MODE_MASK;
+ spi->mode = tmp & SPI_MODE_USER_MASK;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->mode = save;
+ else
+ dev_dbg(&spi->dev, "spi mode %x\n", tmp);
+ }
+ break;
+ case SPI_IOC_WR_LSB_FIRST:
+ retval = get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->mode;
+
+ if (tmp)
+ spi->mode |= SPI_LSB_FIRST;
+ else
+ spi->mode &= ~SPI_LSB_FIRST;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->mode = save;
+ else
+ dev_dbg(&spi->dev, "%csb first\n",
+ tmp ? 'l' : 'm');
+ }
+ break;
+ case SPI_IOC_WR_BITS_PER_WORD:
+ retval = get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ u8 save = spi->bits_per_word;
+
+ spi->bits_per_word = tmp;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->bits_per_word = save;
+ else
+ dev_dbg(&spi->dev, "%d bits per word\n", tmp);
+ }
+ break;
+ case SPI_IOC_WR_MAX_SPEED_HZ: {
+ u32 save;
+
+ retval = get_user(tmp, (__u32 __user *)arg);
+ if (retval)
+ break;
+ if (tmp == 0) {
+ retval = -EINVAL;
+ break;
+ }
+
+ save = spi->max_speed_hz;
+
+ spi->max_speed_hz = tmp;
+ retval = spi_setup(spi);
+ if (retval == 0) {
+ spidev->speed_hz = tmp;
+ dev_dbg(&spi->dev, "%d Hz (max)\n", spidev->speed_hz);
+ }
+
+ spi->max_speed_hz = save;
+ break;
+ }
+ default:
+ /* segmented and/or full-duplex I/O request */
+ /* Check message and copy into scratch area */
+ ioc = spidev_get_ioc_message(cmd,
+ (struct spi_ioc_transfer __user *)arg, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ break;
+ }
+ if (!ioc)
+ break; /* n_ioc is also 0 */
+
+ /* translate to spi_message, execute */
+ retval = spidev_message(spidev, ioc, n_ioc);
+ kfree(ioc);
+ break;
+ }
+
+ mutex_unlock(&spidev->buf_lock);
+ spi_dev_put(spi);
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct spi_ioc_transfer __user *u_ioc;
+ int retval = 0;
+ struct spidev_data *spidev;
+ struct spi_device *spi;
+ unsigned n_ioc, n;
+ struct spi_ioc_transfer *ioc;
+
+ u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ spidev = filp->private_data;
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spi_dev_get(spidev->spi);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
+ mutex_lock(&spidev->buf_lock);
+
+ /* Check message and copy into scratch area */
+ ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ goto done;
+ }
+ if (!ioc)
+ goto done; /* n_ioc is also 0 */
+
+ /* Convert buffer pointers */
+ for (n = 0; n < n_ioc; n++) {
+ ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
+ ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
+ }
+
+ /* translate to spi_message, execute */
+ retval = spidev_message(spidev, ioc, n_ioc);
+ kfree(ioc);
+
+done:
+ mutex_unlock(&spidev->buf_lock);
+ spi_dev_put(spi);
+ return retval;
+}
+
+static long
+spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
+ && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
+ && _IOC_DIR(cmd) == _IOC_WRITE)
+ return spidev_compat_ioc_message(filp, cmd, arg);
+
+ return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define spidev_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
+static int spidev_open(struct inode *inode, struct file *filp)
+{
+ struct spidev_data *spidev = NULL, *iter;
+ int status = -ENXIO;
+
+ mutex_lock(&device_list_lock);
+
+ list_for_each_entry(iter, &device_list, device_entry) {
+ if (iter->devt == inode->i_rdev) {
+ status = 0;
+ spidev = iter;
+ break;
+ }
+ }
+
+ if (!spidev) {
+ pr_debug("spidev: nothing for minor %d\n", iminor(inode));
+ goto err_find_dev;
+ }
+
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+ status = -ENOMEM;
+ goto err_find_dev;
+ }
+ }
+
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+ status = -ENOMEM;
+ goto err_alloc_rx_buf;
+ }
+ }
+
+ spidev->users++;
+ filp->private_data = spidev;
+ stream_open(inode, filp);
+
+ mutex_unlock(&device_list_lock);
+ return 0;
+
+err_alloc_rx_buf:
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+err_find_dev:
+ mutex_unlock(&device_list_lock);
+ return status;
+}
+
+static int spidev_release(struct inode *inode, struct file *filp)
+{
+ struct spidev_data *spidev;
+ int dofree;
+
+ mutex_lock(&device_list_lock);
+ spidev = filp->private_data;
+ filp->private_data = NULL;
+
+ spin_lock_irq(&spidev->spi_lock);
+ /* ... after we unbound from the underlying device? */
+ dofree = (spidev->spi == NULL);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ /* last close? */
+ spidev->users--;
+ if (!spidev->users) {
+
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+
+ kfree(spidev->rx_buffer);
+ spidev->rx_buffer = NULL;
+
+ if (dofree)
+ kfree(spidev);
+ else
+ spidev->speed_hz = spidev->spi->max_speed_hz;
+ }
+#ifdef CONFIG_SPI_SLAVE
+ if (!dofree)
+ spi_slave_abort(spidev->spi);
+#endif
+ mutex_unlock(&device_list_lock);
+
+ return 0;
+}
+
+static const struct file_operations spidev_fops = {
+ .owner = THIS_MODULE,
+ /* REVISIT switch to aio primitives, so that userspace
+ * gets more complete API coverage. It'll simplify things
+ * too, except for the locking.
+ */
+ .write = spidev_write,
+ .read = spidev_read,
+ .unlocked_ioctl = spidev_ioctl,
+ .compat_ioctl = spidev_compat_ioctl,
+ .open = spidev_open,
+ .release = spidev_release,
+ .llseek = no_llseek,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* The main reason to have this class is to make mdev/udev create the
+ * /dev/spidevB.C character device nodes exposing our userspace API.
+ * It also simplifies memory management.
+ */
+
+static struct class *spidev_class;
+
+static const struct spi_device_id spidev_spi_ids[] = {
+ { .name = "dh2228fv" },
+ { .name = "ltc2488" },
+ { .name = "sx1301" },
+ { .name = "bk4" },
+ { .name = "dhcom-board" },
+ { .name = "m53cpld" },
+ { .name = "spi-petra" },
+ { .name = "spi-authenta" },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
+
+/*
+ * spidev should never be referenced in DT without a specific compatible string,
+ * it is a Linux implementation thing rather than a description of the hardware.
+ */
+static int spidev_of_check(struct device *dev)
+{
+ if (device_property_match_string(dev, "compatible", "spidev") < 0)
+ return 0;
+
+ dev_err(dev, "spidev listed directly in DT is not supported\n");
+ return -EINVAL;
+}
+
+static const struct of_device_id spidev_dt_ids[] = {
+ { .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
+ { .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
+ { .compatible = "semtech,sx1301", .data = &spidev_of_check },
+ { .compatible = "lwn,bk4", .data = &spidev_of_check },
+ { .compatible = "dh,dhcom-board", .data = &spidev_of_check },
+ { .compatible = "menlo,m53cpld", .data = &spidev_of_check },
+ { .compatible = "cisco,spi-petra", .data = &spidev_of_check },
+ { .compatible = "micron,spi-authenta", .data = &spidev_of_check },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spidev_dt_ids);
+
+/* Dummy SPI devices not to be used in production systems */
+static int spidev_acpi_check(struct device *dev)
+{
+ dev_warn(dev, "do not use this driver in production systems!\n");
+ return 0;
+}
+
+static const struct acpi_device_id spidev_acpi_ids[] = {
+ /*
+ * The ACPI SPT000* devices are only meant for development and
+ * testing. Systems used in production should have a proper ACPI
+ * description of the connected peripheral and they should also use
+ * a proper driver instead of poking directly to the SPI bus.
+ */
+ { "SPT0001", (kernel_ulong_t)&spidev_acpi_check },
+ { "SPT0002", (kernel_ulong_t)&spidev_acpi_check },
+ { "SPT0003", (kernel_ulong_t)&spidev_acpi_check },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
+
+/*-------------------------------------------------------------------------*/
+
+static int spidev_probe(struct spi_device *spi)
+{
+ int (*match)(struct device *dev);
+ struct spidev_data *spidev;
+ int status;
+ unsigned long minor;
+
+ match = device_get_match_data(&spi->dev);
+ if (match) {
+ status = match(&spi->dev);
+ if (status)
+ return status;
+ }
+
+ /* Allocate driver data */
+ spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
+ if (!spidev)
+ return -ENOMEM;
+
+ /* Initialize the driver data */
+ spidev->spi = spi;
+ spin_lock_init(&spidev->spi_lock);
+ mutex_init(&spidev->buf_lock);
+
+ INIT_LIST_HEAD(&spidev->device_entry);
+
+ /* If we can allocate a minor number, hook up this device.
+ * Reusing minors is fine so long as udev or mdev is working.
+ */
+ mutex_lock(&device_list_lock);
+ minor = find_first_zero_bit(minors, N_SPI_MINORS);
+ if (minor < N_SPI_MINORS) {
+ struct device *dev;
+
+ spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
+ dev = device_create(spidev_class, &spi->dev, spidev->devt,
+ spidev, "spidev%d.%d",
+ spi->master->bus_num, spi->chip_select);
+ status = PTR_ERR_OR_ZERO(dev);
+ } else {
+ dev_dbg(&spi->dev, "no minor number available!\n");
+ status = -ENODEV;
+ }
+ if (status == 0) {
+ set_bit(minor, minors);
+ list_add(&spidev->device_entry, &device_list);
+ }
+ mutex_unlock(&device_list_lock);
+
+ spidev->speed_hz = spi->max_speed_hz;
+
+ if (status == 0)
+ spi_set_drvdata(spi, spidev);
+ else
+ kfree(spidev);
+
+ return status;
+}
+
+static void spidev_remove(struct spi_device *spi)
+{
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+
+ /* prevent new opens */
+ mutex_lock(&device_list_lock);
+ /* make sure ops on existing fds can abort cleanly */
+ spin_lock_irq(&spidev->spi_lock);
+ spidev->spi = NULL;
+ spin_unlock_irq(&spidev->spi_lock);
+
+ list_del(&spidev->device_entry);
+ device_destroy(spidev_class, spidev->devt);
+ clear_bit(MINOR(spidev->devt), minors);
+ if (spidev->users == 0)
+ kfree(spidev);
+ mutex_unlock(&device_list_lock);
+}
+
+static struct spi_driver spidev_spi_driver = {
+ .driver = {
+ .name = "spidev",
+ .of_match_table = spidev_dt_ids,
+ .acpi_match_table = spidev_acpi_ids,
+ },
+ .probe = spidev_probe,
+ .remove = spidev_remove,
+ .id_table = spidev_spi_ids,
+
+ /* NOTE: suspend/resume methods are not necessary here.
+ * We don't do anything except pass the requests to/from
+ * the underlying controller. The refrigerator handles
+ * most issues; the controller driver handles the rest.
+ */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init spidev_init(void)
+{
+ int status;
+
+ /* Claim our 256 reserved device numbers. Then register a class
+ * that will key udev/mdev to add/remove /dev nodes. Last, register
+ * the driver which manages those device numbers.
+ */
+ status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
+ if (status < 0)
+ return status;
+
+ spidev_class = class_create(THIS_MODULE, "spidev");
+ if (IS_ERR(spidev_class)) {
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+ return PTR_ERR(spidev_class);
+ }
+
+ status = spi_register_driver(&spidev_spi_driver);
+ if (status < 0) {
+ class_destroy(spidev_class);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+ }
+ return status;
+}
+module_init(spidev_init);
+
+static void __exit spidev_exit(void)
+{
+ spi_unregister_driver(&spidev_spi_driver);
+ class_destroy(spidev_class);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+}
+module_exit(spidev_exit);
+
+MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
+MODULE_DESCRIPTION("User mode SPI device interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:spidev");