summaryrefslogtreecommitdiffstats
path: root/drivers/mtd/nand/raw
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/mtd/nand/raw
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/mtd/nand/raw')
-rw-r--r--drivers/mtd/nand/raw/Kconfig559
-rw-r--r--drivers/mtd/nand/raw/Makefile71
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c449
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c1372
-rw-r--r--drivers/mtd/nand/raw/atmel/Makefile5
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c2668
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c1018
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.h70
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c367
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/Makefile5
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/bcm47xxnflash.h26
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c81
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c451
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Makefile8
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c101
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c134
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c3242
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.h95
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c37
-rw-r--r--drivers/mtd/nand/raw/brcmnand/iproc_nand.c152
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c3038
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c887
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c427
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c924
-rw-r--r--drivers/mtd/nand/raw/denali.c1381
-rw-r--r--drivers/mtd/nand/raw/denali.h398
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c265
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c139
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c1579
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c1003
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c1142
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c273
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c1232
-rw-r--r--drivers/mtd/nand/raw/gpio.c409
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/Makefile3
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/bch-regs.h115
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c2667
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h175
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h179
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c876
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig45
-rw-r--r--drivers/mtd/nand/raw/ingenic/Makefile9
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c155
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.h83
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c573
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4725b_bch.c295
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4740_ecc.c197
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4780_bch.c271
-rw-r--r--drivers/mtd/nand/raw/internals.h169
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c910
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c1038
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c3176
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c1481
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c859
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c601
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.h47
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c1682
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c1941
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c588
-rw-r--r--drivers/mtd/nand/raw/nand_amd.c53
-rw-r--r--drivers/mtd/nand/raw/nand_base.c5987
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c1455
-rw-r--r--drivers/mtd/nand/raw/nand_bch.c219
-rw-r--r--drivers/mtd/nand/raw/nand_ecc.c484
-rw-r--r--drivers/mtd/nand/raw/nand_esmt.c59
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c721
-rw-r--r--drivers/mtd/nand/raw/nand_ids.c207
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c139
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c643
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c334
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c599
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c334
-rw-r--r--drivers/mtd/nand/raw/nand_samsung.c139
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c642
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c300
-rw-r--r--drivers/mtd/nand/raw/nandsim.c2457
-rw-r--r--drivers/mtd/nand/raw/ndfc.c278
-rw-r--r--drivers/mtd/nand/raw/omap2.c2319
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c573
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c254
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c213
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c243
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c163
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c3083
-rw-r--r--drivers/mtd/nand/raw/r852.c1093
-rw-r--r--drivers/mtd/nand/raw/r852.h155
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c1294
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c1234
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c248
-rw-r--r--drivers/mtd/nand/raw/sm_common.c210
-rw-r--r--drivers/mtd/nand/raw/sm_common.h58
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c242
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c2084
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2236
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c727
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c1249
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c531
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c424
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c967
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c270
100 files changed, 77063 insertions, 0 deletions
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
new file mode 100644
index 000000000..6c46f25b5
--- /dev/null
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -0,0 +1,559 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MTD_NAND_ECC_SW_HAMMING
+ tristate
+
+config MTD_NAND_ECC_SW_HAMMING_SMC
+ bool "NAND ECC Smart Media byte order"
+ depends on MTD_NAND_ECC_SW_HAMMING
+ default n
+ help
+ Software ECC according to the Smart Media Specification.
+ The original Linux implementation had byte 0 and 1 swapped.
+
+menuconfig MTD_RAW_NAND
+ tristate "Raw/Parallel NAND Device Support"
+ select MTD_NAND_CORE
+ select MTD_NAND_ECC
+ select MTD_NAND_ECC_SW_HAMMING
+ help
+ This enables support for accessing all type of raw/parallel
+ NAND flash devices. For further information see
+ <http://www.linux-mtd.infradead.org/doc/nand.html>.
+
+if MTD_RAW_NAND
+
+config MTD_NAND_ECC_SW_BCH
+ bool "Support software BCH ECC"
+ select BCH
+ default n
+ help
+ This enables support for software BCH error correction. Binary BCH
+ codes are more powerful and cpu intensive than traditional Hamming
+ ECC codes. They are used with NAND devices requiring more than 1 bit
+ of error correction.
+
+comment "Raw/parallel NAND flash controllers"
+
+config MTD_NAND_DENALI
+ tristate
+
+config MTD_NAND_DENALI_PCI
+ tristate "Denali NAND controller on Intel Moorestown"
+ select MTD_NAND_DENALI
+ depends on PCI
+ help
+ Enable the driver for NAND flash on Intel Moorestown, using the
+ Denali NAND controller core.
+
+config MTD_NAND_DENALI_DT
+ tristate "Denali NAND controller as a DT device"
+ select MTD_NAND_DENALI
+ depends on HAS_DMA && HAVE_CLK && OF
+ help
+ Enable the driver for NAND flash on platforms using a Denali NAND
+ controller as a DT device.
+
+config MTD_NAND_AMS_DELTA
+ tristate "Amstrad E3 NAND controller"
+ depends on MACH_AMS_DELTA || COMPILE_TEST
+ default y
+ help
+ Support for NAND flash on Amstrad E3 (Delta).
+
+config MTD_NAND_OMAP2
+ tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
+ and Keystone platforms.
+
+config MTD_NAND_OMAP_BCH
+ depends on MTD_NAND_OMAP2
+ bool "Support hardware based BCH error correction"
+ default n
+ select BCH
+ help
+ This config enables the ELM hardware engine, which can be used to
+ locate and correct errors when using BCH ECC scheme. This offloads
+ the cpu from doing ECC error searching and correction. However some
+ legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine
+ so this is optional for them.
+
+config MTD_NAND_OMAP_BCH_BUILD
+ def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
+
+config MTD_NAND_AU1550
+ tristate "Au1550/1200 NAND support"
+ depends on MIPS_ALCHEMY
+ help
+ This enables the driver for the NAND flash controller on the
+ AMD/Alchemy 1550 SOC.
+
+config MTD_NAND_NDFC
+ tristate "IBM/MCC 4xx NAND controller"
+ depends on 4xx
+ select MTD_NAND_ECC_SW_HAMMING_SMC
+ help
+ NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+
+config MTD_NAND_S3C2410
+ tristate "Samsung S3C NAND controller"
+ depends on ARCH_S3C24XX || ARCH_S3C64XX
+ help
+ This enables the NAND flash controller on the S3C24xx and S3C64xx
+ SoCs
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_S3C2410_DEBUG
+ bool "Samsung S3C NAND controller debug"
+ depends on MTD_NAND_S3C2410
+ help
+ Enable debugging of the S3C NAND driver
+
+config MTD_NAND_S3C2410_CLKSTOP
+ bool "Samsung S3C NAND IDLE clock stop"
+ depends on MTD_NAND_S3C2410
+ default n
+ help
+ Stop the clock to the NAND controller when there is no chip
+ selected to save power. This will mean there is a small delay
+ when the is NAND chip selected or released, but will save
+ approximately 5mA of power when there is nothing happening.
+
+config MTD_NAND_TANGO
+ tristate "Tango NAND controller"
+ depends on ARCH_TANGO || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables the NAND Flash controller on Tango chips.
+
+config MTD_NAND_SHARPSL
+ tristate "Sharp SL Series (C7xx + others) NAND controller"
+ depends on ARCH_PXA || COMPILE_TEST
+ depends on HAS_IOMEM
+
+config MTD_NAND_CAFE
+ tristate "OLPC CAFÉ NAND controller"
+ depends on PCI
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ Use NAND flash attached to the CAFÉ chip designed for the OLPC
+ laptop.
+
+config MTD_NAND_CS553X
+ tristate "CS5535/CS5536 (AMD Geode companion) NAND controller"
+ depends on X86_32
+ depends on !UML && HAS_IOMEM
+ help
+ The CS553x companion chips for the AMD Geode processor
+ include NAND flash controllers with built-in hardware ECC
+ capabilities; enabling this option will allow you to use
+ these. The driver will check the MSRs to verify that the
+ controller is enabled for NAND, and currently requires that
+ the controller be in MMIO mode.
+
+ If you say "m", the module will be called cs553x_nand.
+
+config MTD_NAND_ATMEL
+ tristate "Atmel AT91 NAND Flash/SmartMedia NAND controller"
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select GENERIC_ALLOCATOR
+ select MFD_ATMEL_SMC
+ help
+ Enables support for NAND Flash / Smart Media Card interface
+ on Atmel AT91 processors.
+
+config MTD_NAND_ORION
+ tristate "Marvell Orion NAND controller"
+ depends on PLAT_ORION
+ help
+ This enables the NAND flash controller on Orion machines.
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_MARVELL
+ tristate "Marvell EBU NAND controller"
+ depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
+ COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller driver for Marvell boards,
+ including:
+ - PXA3xx processors (NFCv1)
+ - 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
+ - 64-bit Aramda platforms (7k, 8k) (NFCv2)
+
+config MTD_NAND_SLC_LPC32XX
+ tristate "NXP LPC32xx SLC NAND controller"
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
+ chips) NAND controller. This is the default for the PHYTEC 3250
+ reference board which contains a NAND256R3A2CZA6 chip.
+
+ Please check the actual NAND chip connected and its support
+ by the SLC NAND controller.
+
+config MTD_NAND_MLC_LPC32XX
+ tristate "NXP LPC32xx MLC NAND controller"
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
+ controller. This is the default for the WORK92105 controller
+ board.
+
+ Please check the actual NAND chip connected and its support
+ by the MLC NAND controller.
+
+config MTD_NAND_PASEMI
+ tristate "PA Semi PWRficient NAND controller"
+ depends on PPC_PASEMI
+ help
+ Enables support for NAND Flash interface on PA Semi PWRficient
+ based boards
+
+config MTD_NAND_TMIO
+ tristate "Toshiba Mobile IO NAND controller"
+ depends on MFD_TMIO
+ help
+ Support for NAND flash connected to a Toshiba Mobile IO
+ Controller in some PDAs, including the Sharp SL6000x.
+
+config MTD_NAND_BRCMNAND
+ tristate "Broadcom STB NAND controller"
+ depends on ARM || ARM64 || MIPS || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables the Broadcom NAND controller driver. The controller was
+ originally designed for Set-Top Box but is used on various BCM7xxx,
+ BCM3xxx, BCM63xxx, iProc/Cygnus and more.
+
+config MTD_NAND_BCM47XXNFLASH
+ tristate "BCM4706 BCMA NAND controller"
+ depends on BCMA_NFLASH
+ depends on BCMA
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ NAND flash memories. For now only BCM4706 is supported.
+
+config MTD_NAND_OXNAS
+ tristate "Oxford Semiconductor NAND controller"
+ depends on ARCH_OXNAS || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller on Oxford Semiconductor SoCs.
+
+config MTD_NAND_MPC5121_NFC
+ tristate "MPC5121 NAND controller"
+ depends on PPC_MPC512x
+ help
+ This enables the driver for the NAND flash controller on the
+ MPC5121 SoC.
+
+config MTD_NAND_GPMI_NAND
+ tristate "Freescale GPMI NAND controller"
+ depends on MXS_DMA
+ help
+ Enables NAND Flash support for IMX23, IMX28 or IMX6.
+ The GPMI controller is very powerful, with the help of BCH
+ module, it can do the hardware ECC. The GPMI supports several
+ NAND flashs at the same time.
+
+config MTD_NAND_FSL_ELBC
+ tristate "Freescale eLBC NAND controller"
+ depends on FSL_SOC
+ select FSL_LBC
+ help
+ Various Freescale chips, including the 8313, include a NAND Flash
+ Controller Module with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
+config MTD_NAND_FSL_IFC
+ tristate "Freescale IFC NAND controller"
+ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM
+ select FSL_IFC
+ select MEMORY
+ help
+ Various Freescale chips e.g P1010, include a NAND Flash machine
+ with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
+config MTD_NAND_FSL_UPM
+ tristate "Freescale UPM NAND controller"
+ depends on PPC_83xx || PPC_85xx
+ select FSL_LBC
+ help
+ Enables support for NAND Flash chips wired onto Freescale PowerPC
+ processor localbus with User-Programmable Machine support.
+
+config MTD_NAND_VF610_NFC
+ tristate "Freescale VF610/MPC5125 NAND controller"
+ depends on (SOC_VF610 || COMPILE_TEST)
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND Flash Controller on some Freescale
+ processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
+ The driver supports a maximum 2k page size. With 2k pages and
+ 64 bytes or more of OOB, hardware ECC with up to 32-bit error
+ correction is supported. Hardware ECC is only enabled through
+ device tree.
+
+config MTD_NAND_MXC
+ tristate "Freescale MXC NAND controller"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the driver for the NAND flash controller on the
+ MXC processors.
+
+config MTD_NAND_SH_FLCTL
+ tristate "Renesas SuperH FLCTL NAND controller"
+ depends on SUPERH || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Several Renesas SuperH CPU has FLCTL. This option enables support
+ for NAND Flash using FLCTL.
+
+config MTD_NAND_DAVINCI
+ tristate "DaVinci/Keystone NAND controller"
+ depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF) || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enable the driver for NAND flash chips on Texas Instruments
+ DaVinci/Keystone processors.
+
+config MTD_NAND_TXX9NDFMC
+ tristate "TXx9 NAND controller"
+ depends on SOC_TX4938 || SOC_TX4939 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller on the TXx9 SoCs.
+
+config MTD_NAND_SOCRATES
+ tristate "Socrates NAND controller"
+ depends on SOCRATES
+ help
+ Enables support for NAND Flash chips wired onto Socrates board.
+
+source "drivers/mtd/nand/raw/ingenic/Kconfig"
+
+config MTD_NAND_FSMC
+ tristate "ST Micros FSMC NAND controller"
+ depends on OF && HAS_IOMEM
+ depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300 || \
+ COMPILE_TEST
+ help
+ Enables support for NAND Flash chips on the ST Microelectronics
+ Flexible Static Memory Controller (FSMC)
+
+config MTD_NAND_XWAY
+ bool "Lantiq XWAY NAND controller"
+ depends on LANTIQ && SOC_TYPE_XWAY
+ help
+ Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
+ to the External Bus Unit (EBU).
+
+config MTD_NAND_SUNXI
+ tristate "Allwinner NAND controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND Flash chips on Allwinner SoCs.
+
+config MTD_NAND_HISI504
+ tristate "Hisilicon Hip04 NAND controller"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND controller on Hisilicon SoC Hip04.
+
+config MTD_NAND_QCOM
+ tristate "QCOM NAND controller"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND flash chips on SoCs containing the EBI2 NAND
+ controller. This controller is found on IPQ806x SoC.
+
+config MTD_NAND_MTK
+ tristate "MTK NAND controller"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND controller on MTK SoCs.
+ This controller is found on mt27xx, mt81xx, mt65xx SoCs.
+
+config MTD_NAND_MXIC
+ tristate "Macronix raw NAND controller"
+ depends on HAS_IOMEM || COMPILE_TEST
+ help
+ This selects the Macronix raw NAND controller driver.
+
+config MTD_NAND_TEGRA
+ tristate "NVIDIA Tegra NAND controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND flash controller on NVIDIA Tegra SoC.
+ The driver has been developed and tested on a Tegra 2 SoC. DMA
+ support, raw read/write page as well as HW ECC read/write page
+ is supported. Extra OOB bytes when using HW ECC are currently
+ not supported.
+
+config MTD_NAND_STM32_FMC2
+ tristate "Support for NAND controller on STM32MP SoCs"
+ depends on MACH_STM32MP157 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Enables support for NAND Flash chips on SoCs containing the FMC2
+ NAND controller. This controller is found on STM32MP SoCs.
+ The controller supports a maximum 8k page size and supports
+ a maximum 8-bit correction error per sector of 512 bytes.
+
+config MTD_NAND_MESON
+ tristate "Support for NAND controller on Amlogic's Meson SoCs"
+ depends on ARCH_MESON || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Enables support for NAND controller on Amlogic's Meson SoCs.
+ This controller is found on Meson SoCs.
+
+config MTD_NAND_GPIO
+ tristate "GPIO assisted NAND controller"
+ depends on GPIOLIB || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables a NAND flash driver where control signals are
+ connected to GPIO pins, and commands and data are communicated
+ via a memory mapped interface.
+
+config MTD_NAND_PLATFORM
+ tristate "Generic NAND controller"
+ depends on HAS_IOMEM
+ help
+ This implements a generic NAND driver for on-SOC platform
+ devices. You will need to provide platform-specific functions
+ via platform_data.
+
+config MTD_NAND_CADENCE
+ tristate "Support Cadence NAND (HPNFC) controller"
+ depends on (OF || COMPILE_TEST) && HAS_IOMEM
+ help
+ Enable the driver for NAND flash on platforms using a Cadence NAND
+ controller.
+
+config MTD_NAND_ARASAN
+ tristate "Support for Arasan NAND flash controller"
+ depends on HAS_IOMEM && HAS_DMA
+ select BCH
+ help
+ Enables the driver for the Arasan NAND flash controller on
+ Zynq Ultrascale+ MPSoC.
+
+comment "Misc"
+
+config MTD_SM_COMMON
+ tristate
+ default n
+
+config MTD_NAND_NANDSIM
+ tristate "Support for NAND Flash Simulator"
+ help
+ The simulator may simulate various NAND flash chips for the
+ MTD nand layer.
+
+config MTD_NAND_RICOH
+ tristate "Ricoh xD card reader"
+ default n
+ depends on PCI
+ select MTD_SM_COMMON
+ help
+ Enable support for Ricoh R5C852 xD card reader
+ You also need to enable ether
+ NAND SSFDC (SmartMedia) read only translation layer' or new
+ expermental, readwrite
+ 'SmartMedia/xD new translation layer'
+
+config MTD_NAND_DISKONCHIP
+ tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
+ depends on HAS_IOMEM
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ This is a reimplementation of M-Systems DiskOnChip 2000,
+ Millennium and Millennium Plus as a standard NAND device driver,
+ as opposed to the earlier self-contained MTD device drivers.
+ This should enable, among other things, proper JFFS2 operation on
+ these devices.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ bool "Advanced detection options for DiskOnChip"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ This option allows you to specify nonstandard address at which to
+ probe for a DiskOnChip, or to change the detection options. You
+ are unlikely to need any of this unless you are using LinuxBIOS.
+ Say 'N'.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+ hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ depends on MTD_NAND_DISKONCHIP
+ default "0"
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option allows you to specify a single address at which to probe
+ for the device, which is useful if you have other devices in that
+ range which get upset when they are probed.
+
+ (Note that on PowerPC, the normal probe will only check at
+ 0xE4000000.)
+
+ Normally, you should leave this set to zero, to allow the probe at
+ the normal addresses.
+
+config MTD_NAND_DISKONCHIP_PROBE_HIGH
+ bool "Probe high addresses"
+ depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option changes to make it probe between 0xFFFC8000 and
+ 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
+ useful to you. Say 'N'.
+
+config MTD_NAND_DISKONCHIP_BBTWRITE
+ bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ On DiskOnChip devices shipped with the INFTL filesystem (Millennium
+ and 2000 TSOP/Alon), Linux reserves some space at the end of the
+ device for the Bad Block Table (BBT). If you have existing INFTL
+ data on your device (created by non-Linux tools such as M-Systems'
+ DOS drivers), your data might overlap the area Linux wants to use for
+ the BBT. If this is a concern for you, leave this option disabled and
+ Linux will not write BBT data into this area.
+ The downside of leaving this option disabled is that if bad blocks
+ are detected by Linux, they will not be recorded in the BBT, which
+ could cause future problems.
+ Once you enable this option, new filesystems (INFTL or others, created
+ in Linux or other operating systems) will not use the reserved area.
+ The only reason not to enable this option is to prevent damage to
+ preexisting filesystems.
+ Even if you leave this disabled, you can enable BBT writes at module
+ load time (assuming you build diskonchip as a module) with the module
+ parameter "inftl_bbt_write=1".
+
+endif # MTD_RAW_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
new file mode 100644
index 000000000..2930f5b90
--- /dev/null
+++ b/drivers/mtd/nand/raw/Makefile
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_MTD_RAW_NAND) += nand.o
+obj-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += nand_ecc.o
+nand-$(CONFIG_MTD_NAND_ECC_SW_BCH) += nand_bch.o
+obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
+
+obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
+obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
+obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
+obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
+obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
+obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
+obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
+obj-$(CONFIG_MTD_NAND_TANGO) += tango_nand.o
+obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
+obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
+obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
+obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
+obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
+obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
+obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
+obj-$(CONFIG_MTD_NAND_ATMEL) += atmel/
+obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
+omap2_nand-objs := omap2.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
+obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
+obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
+obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
+obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
+obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
+obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
+obj-$(CONFIG_MTD_NAND_OXNAS) += oxnas_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
+obj-$(CONFIG_MTD_NAND_SLC_LPC32XX) += lpc32xx_slc.o
+obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o
+obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
+obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
+obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
+obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
+obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
+obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
+obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
+obj-y += ingenic/
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
+obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
+obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
+obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
+obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
+obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
+obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o
+obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
+obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
+obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
+obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o
+obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o
+
+nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
+nand-objs += nand_onfi.o
+nand-objs += nand_jedec.o
+nand-objs += nand_amd.o
+nand-objs += nand_esmt.o
+nand-objs += nand_hynix.o
+nand-objs += nand_macronix.o
+nand-objs += nand_micron.o
+nand-objs += nand_samsung.o
+nand-objs += nand_toshiba.o
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
new file mode 100644
index 000000000..13de39aa3
--- /dev/null
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
+ *
+ * Derived from drivers/mtd/nand/toto.c (removed in v2.6.28)
+ * Copyright (c) 2003 Texas Instruments
+ * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
+ *
+ * Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ * Partially stolen from plat_nand.c
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * Amstrad E3 (Delta).
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-gpio.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+
+/*
+ * MTD structure for E3 (Delta)
+ */
+struct gpio_nand {
+ struct nand_controller base;
+ struct nand_chip nand_chip;
+ struct gpio_desc *gpiod_rdy;
+ struct gpio_desc *gpiod_nce;
+ struct gpio_desc *gpiod_nre;
+ struct gpio_desc *gpiod_nwp;
+ struct gpio_desc *gpiod_nwe;
+ struct gpio_desc *gpiod_ale;
+ struct gpio_desc *gpiod_cle;
+ struct gpio_descs *data_gpiods;
+ bool data_in;
+ unsigned int tRP;
+ unsigned int tWP;
+ u8 (*io_read)(struct gpio_nand *this);
+ void (*io_write)(struct gpio_nand *this, u8 byte);
+};
+
+static void gpio_nand_write_commit(struct gpio_nand *priv)
+{
+ gpiod_set_value(priv->gpiod_nwe, 1);
+ ndelay(priv->tWP);
+ gpiod_set_value(priv->gpiod_nwe, 0);
+}
+
+static void gpio_nand_io_write(struct gpio_nand *priv, u8 byte)
+{
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
+
+ gpiod_set_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
+ data_gpiods->info, values);
+
+ gpio_nand_write_commit(priv);
+}
+
+static void gpio_nand_dir_output(struct gpio_nand *priv, u8 byte)
+{
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
+ int i;
+
+ for (i = 0; i < data_gpiods->ndescs; i++)
+ gpiod_direction_output_raw(data_gpiods->desc[i],
+ test_bit(i, values));
+
+ gpio_nand_write_commit(priv);
+
+ priv->data_in = false;
+}
+
+static u8 gpio_nand_io_read(struct gpio_nand *priv)
+{
+ u8 res;
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(res)) = { 0, };
+
+ gpiod_set_value(priv->gpiod_nre, 1);
+ ndelay(priv->tRP);
+
+ gpiod_get_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
+ data_gpiods->info, values);
+
+ gpiod_set_value(priv->gpiod_nre, 0);
+
+ res = values[0];
+ return res;
+}
+
+static void gpio_nand_dir_input(struct gpio_nand *priv)
+{
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ int i;
+
+ for (i = 0; i < data_gpiods->ndescs; i++)
+ gpiod_direction_input(data_gpiods->desc[i]);
+
+ priv->data_in = true;
+}
+
+static void gpio_nand_write_buf(struct gpio_nand *priv, const u8 *buf, int len)
+{
+ int i = 0;
+
+ if (len > 0 && priv->data_in)
+ gpio_nand_dir_output(priv, buf[i++]);
+
+ while (i < len)
+ priv->io_write(priv, buf[i++]);
+}
+
+static void gpio_nand_read_buf(struct gpio_nand *priv, u8 *buf, int len)
+{
+ int i;
+
+ if (priv->data_gpiods && !priv->data_in)
+ gpio_nand_dir_input(priv);
+
+ for (i = 0; i < len; i++)
+ buf[i] = priv->io_read(priv);
+}
+
+static void gpio_nand_ctrl_cs(struct gpio_nand *priv, bool assert)
+{
+ gpiod_set_value(priv->gpiod_nce, assert);
+}
+
+static int gpio_nand_exec_op(struct nand_chip *this,
+ const struct nand_operation *op, bool check_only)
+{
+ struct gpio_nand *priv = nand_get_controller_data(this);
+ const struct nand_op_instr *instr;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ gpio_nand_ctrl_cs(priv, 1);
+
+ for (instr = op->instrs; instr < op->instrs + op->ninstrs; instr++) {
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ gpiod_set_value(priv->gpiod_cle, 1);
+ gpio_nand_write_buf(priv, &instr->ctx.cmd.opcode, 1);
+ gpiod_set_value(priv->gpiod_cle, 0);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ gpiod_set_value(priv->gpiod_ale, 1);
+ gpio_nand_write_buf(priv, instr->ctx.addr.addrs,
+ instr->ctx.addr.naddrs);
+ gpiod_set_value(priv->gpiod_ale, 0);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ gpio_nand_read_buf(priv, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ gpio_nand_write_buf(priv, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = priv->gpiod_rdy ?
+ nand_gpio_waitrdy(this, priv->gpiod_rdy,
+ instr->ctx.waitrdy.timeout_ms) :
+ nand_soft_waitrdy(this,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (ret)
+ break;
+ }
+
+ gpio_nand_ctrl_cs(priv, 0);
+
+ return ret;
+}
+
+static int gpio_nand_setup_interface(struct nand_chip *this, int csline,
+ const struct nand_interface_config *cf)
+{
+ struct gpio_nand *priv = nand_get_controller_data(this);
+ const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf);
+ struct device *dev = &nand_to_mtd(this)->dev;
+
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ if (priv->gpiod_nre) {
+ priv->tRP = DIV_ROUND_UP(sdr->tRP_min, 1000);
+ dev_dbg(dev, "using %u ns read pulse width\n", priv->tRP);
+ }
+
+ priv->tWP = DIV_ROUND_UP(sdr->tWP_min, 1000);
+ dev_dbg(dev, "using %u ns write pulse width\n", priv->tWP);
+
+ return 0;
+}
+
+static int gpio_nand_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops gpio_nand_ops = {
+ .exec_op = gpio_nand_exec_op,
+ .attach_chip = gpio_nand_attach_chip,
+ .setup_interface = gpio_nand_setup_interface,
+};
+
+/*
+ * Main initialization routine
+ */
+static int gpio_nand_probe(struct platform_device *pdev)
+{
+ struct gpio_nand_platdata *pdata = dev_get_platdata(&pdev->dev);
+ const struct mtd_partition *partitions = NULL;
+ int num_partitions = 0;
+ struct gpio_nand *priv;
+ struct nand_chip *this;
+ struct mtd_info *mtd;
+ int (*probe)(struct platform_device *pdev, struct gpio_nand *priv);
+ int err = 0;
+
+ if (pdata) {
+ partitions = pdata->parts;
+ num_partitions = pdata->num_parts;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_nand),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ this = &priv->nand_chip;
+
+ mtd = nand_to_mtd(this);
+ mtd->dev.parent = &pdev->dev;
+
+ nand_set_controller_data(this, priv);
+ nand_set_flash_node(this, pdev->dev.of_node);
+
+ priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
+ if (IS_ERR(priv->gpiod_rdy)) {
+ err = PTR_ERR(priv->gpiod_rdy);
+ dev_warn(&pdev->dev, "RDY GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ /* Set chip enabled but write protected */
+ priv->gpiod_nwp = devm_gpiod_get_optional(&pdev->dev, "nwp",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->gpiod_nwp)) {
+ err = PTR_ERR(priv->gpiod_nwp);
+ dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ priv->gpiod_nce = devm_gpiod_get_optional(&pdev->dev, "nce",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_nce)) {
+ err = PTR_ERR(priv->gpiod_nce);
+ dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ priv->gpiod_nre = devm_gpiod_get_optional(&pdev->dev, "nre",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_nre)) {
+ err = PTR_ERR(priv->gpiod_nre);
+ dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ priv->gpiod_nwe = devm_gpiod_get_optional(&pdev->dev, "nwe",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_nwe)) {
+ err = PTR_ERR(priv->gpiod_nwe);
+ dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ priv->gpiod_ale = devm_gpiod_get(&pdev->dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_ale)) {
+ err = PTR_ERR(priv->gpiod_ale);
+ dev_err(&pdev->dev, "ALE GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ priv->gpiod_cle = devm_gpiod_get(&pdev->dev, "cle", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_cle)) {
+ err = PTR_ERR(priv->gpiod_cle);
+ dev_err(&pdev->dev, "CLE GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ /* Request array of data pins, initialize them as input */
+ priv->data_gpiods = devm_gpiod_get_array_optional(&pdev->dev, "data",
+ GPIOD_IN);
+ if (IS_ERR(priv->data_gpiods)) {
+ err = PTR_ERR(priv->data_gpiods);
+ dev_err(&pdev->dev, "data GPIO request failed: %d\n", err);
+ return err;
+ }
+ if (priv->data_gpiods) {
+ if (!priv->gpiod_nwe) {
+ dev_err(&pdev->dev,
+ "mandatory NWE pin not provided by platform\n");
+ return -ENODEV;
+ }
+
+ priv->io_read = gpio_nand_io_read;
+ priv->io_write = gpio_nand_io_write;
+ priv->data_in = true;
+ }
+
+ if (pdev->id_entry)
+ probe = (void *) pdev->id_entry->driver_data;
+ else
+ probe = of_device_get_match_data(&pdev->dev);
+ if (probe)
+ err = probe(pdev, priv);
+ if (err)
+ return err;
+
+ if (!priv->io_read || !priv->io_write) {
+ dev_err(&pdev->dev, "incomplete device configuration\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the NAND controller object embedded in gpio_nand. */
+ priv->base.ops = &gpio_nand_ops;
+ nand_controller_init(&priv->base);
+ this->controller = &priv->base;
+
+ /*
+ * FIXME: We should release write protection only after nand_scan() to
+ * be on the safe side but we can't do that until we have a generic way
+ * to assert/deassert WP from the core. Even if the core shouldn't
+ * write things in the nand_scan() path, it should have control on this
+ * pin just in case we ever need to disable write protection during
+ * chip detection/initialization.
+ */
+ /* Release write protection */
+ gpiod_set_value(priv->gpiod_nwp, 0);
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(this, 1);
+ if (err)
+ return err;
+
+ /* Register the partitions */
+ err = mtd_device_register(mtd, partitions, num_partitions);
+ if (err)
+ goto err_nand_cleanup;
+
+ return 0;
+
+err_nand_cleanup:
+ nand_cleanup(this);
+
+ return err;
+}
+
+/*
+ * Clean up routine
+ */
+static int gpio_nand_remove(struct platform_device *pdev)
+{
+ struct gpio_nand *priv = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
+ int ret;
+
+ /* Apply write protection */
+ gpiod_set_value(priv->gpiod_nwp, 1);
+
+ /* Unregister device */
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(mtd_to_nand(mtd));
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_of_id_table[] = {
+ {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
+#endif
+
+static const struct platform_device_id gpio_nand_plat_id_table[] = {
+ {
+ .name = "ams-delta-nand",
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(platform, gpio_nand_plat_id_table);
+
+static struct platform_driver gpio_nand_driver = {
+ .probe = gpio_nand_probe,
+ .remove = gpio_nand_remove,
+ .id_table = gpio_nand_plat_id_table,
+ .driver = {
+ .name = "ams-delta-nand",
+ .of_match_table = of_match_ptr(gpio_nand_of_id_table),
+ },
+};
+
+module_platform_driver(gpio_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
new file mode 100644
index 000000000..ef062b5ea
--- /dev/null
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -0,0 +1,1372 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arasan NAND Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2020 Xilinx, Inc.
+ * Author:
+ * Miquel Raynal <miquel.raynal@bootlin.com>
+ * Original work (fully rewritten):
+ * Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ * Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/bch.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PKT_REG 0x00
+#define PKT_SIZE(x) FIELD_PREP(GENMASK(10, 0), (x))
+#define PKT_STEPS(x) FIELD_PREP(GENMASK(23, 12), (x))
+
+#define MEM_ADDR1_REG 0x04
+
+#define MEM_ADDR2_REG 0x08
+#define ADDR2_STRENGTH(x) FIELD_PREP(GENMASK(27, 25), (x))
+#define ADDR2_CS(x) FIELD_PREP(GENMASK(31, 30), (x))
+
+#define CMD_REG 0x0C
+#define CMD_1(x) FIELD_PREP(GENMASK(7, 0), (x))
+#define CMD_2(x) FIELD_PREP(GENMASK(15, 8), (x))
+#define CMD_PAGE_SIZE(x) FIELD_PREP(GENMASK(25, 23), (x))
+#define CMD_DMA_ENABLE BIT(27)
+#define CMD_NADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
+#define CMD_ECC_ENABLE BIT(31)
+
+#define PROG_REG 0x10
+#define PROG_PGRD BIT(0)
+#define PROG_ERASE BIT(2)
+#define PROG_STATUS BIT(3)
+#define PROG_PGPROG BIT(4)
+#define PROG_RDID BIT(6)
+#define PROG_RDPARAM BIT(7)
+#define PROG_RST BIT(8)
+#define PROG_GET_FEATURE BIT(9)
+#define PROG_SET_FEATURE BIT(10)
+
+#define INTR_STS_EN_REG 0x14
+#define INTR_SIG_EN_REG 0x18
+#define INTR_STS_REG 0x1C
+#define WRITE_READY BIT(0)
+#define READ_READY BIT(1)
+#define XFER_COMPLETE BIT(2)
+#define DMA_BOUNDARY BIT(6)
+#define EVENT_MASK GENMASK(7, 0)
+
+#define READY_STS_REG 0x20
+
+#define DMA_ADDR0_REG 0x50
+#define DMA_ADDR1_REG 0x24
+
+#define FLASH_STS_REG 0x28
+
+#define DATA_PORT_REG 0x30
+
+#define ECC_CONF_REG 0x34
+#define ECC_CONF_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define ECC_CONF_LEN(x) FIELD_PREP(GENMASK(26, 16), (x))
+#define ECC_CONF_BCH_EN BIT(27)
+
+#define ECC_ERR_CNT_REG 0x38
+#define GET_PKT_ERR_CNT(x) FIELD_GET(GENMASK(7, 0), (x))
+#define GET_PAGE_ERR_CNT(x) FIELD_GET(GENMASK(16, 8), (x))
+
+#define ECC_SP_REG 0x3C
+#define ECC_SP_CMD1(x) FIELD_PREP(GENMASK(7, 0), (x))
+#define ECC_SP_CMD2(x) FIELD_PREP(GENMASK(15, 8), (x))
+#define ECC_SP_ADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
+
+#define ECC_1ERR_CNT_REG 0x40
+#define ECC_2ERR_CNT_REG 0x44
+
+#define DATA_INTERFACE_REG 0x6C
+#define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x))
+#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (x))
+#define DIFACE_SDR 0
+#define DIFACE_NVDDR BIT(9)
+
+#define ANFC_MAX_CS 2
+#define ANFC_DFLT_TIMEOUT_US 1000000
+#define ANFC_MAX_CHUNK_SIZE SZ_1M
+#define ANFC_MAX_PARAM_SIZE SZ_4K
+#define ANFC_MAX_STEPS SZ_2K
+#define ANFC_MAX_PKT_SIZE (SZ_2K - 1)
+#define ANFC_MAX_ADDR_CYC 5U
+#define ANFC_RSVD_ECC_BYTES 21
+
+#define ANFC_XLNX_SDR_DFLT_CORE_CLK 100000000
+#define ANFC_XLNX_SDR_HS_CORE_CLK 80000000
+
+/**
+ * struct anfc_op - Defines how to execute an operation
+ * @pkt_reg: Packet register
+ * @addr1_reg: Memory address 1 register
+ * @addr2_reg: Memory address 2 register
+ * @cmd_reg: Command register
+ * @prog_reg: Program register
+ * @steps: Number of "packets" to read/write
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @len: Data transfer length
+ * @read: Data transfer direction from the controller point of view
+ */
+struct anfc_op {
+ u32 pkt_reg;
+ u32 addr1_reg;
+ u32 addr2_reg;
+ u32 cmd_reg;
+ u32 prog_reg;
+ int steps;
+ unsigned int rdy_timeout_ms;
+ unsigned int len;
+ bool read;
+ u8 *buf;
+};
+
+/**
+ * struct anand - Defines the NAND chip related information
+ * @node: Used to store NAND chips into a list
+ * @chip: NAND chip information structure
+ * @cs: Chip select line
+ * @rb: Ready-busy line
+ * @page_sz: Register value of the page_sz field to use
+ * @clk: Expected clock frequency to use
+ * @timings: Data interface timing mode to use
+ * @ecc_conf: Hardware ECC configuration value
+ * @strength: Register value of the ECC strength
+ * @raddr_cycles: Row address cycle information
+ * @caddr_cycles: Column address cycle information
+ * @ecc_bits: Exact number of ECC bits per syndrome
+ * @ecc_total: Total number of ECC bytes
+ * @errloc: Array of errors located with soft BCH
+ * @hw_ecc: Buffer to store syndromes computed by hardware
+ * @bch: BCH structure
+ */
+struct anand {
+ struct list_head node;
+ struct nand_chip chip;
+ unsigned int cs;
+ unsigned int rb;
+ unsigned int page_sz;
+ unsigned long clk;
+ u32 timings;
+ u32 ecc_conf;
+ u32 strength;
+ u16 raddr_cycles;
+ u16 caddr_cycles;
+ unsigned int ecc_bits;
+ unsigned int ecc_total;
+ unsigned int *errloc;
+ u8 *hw_ecc;
+ struct bch_control *bch;
+};
+
+/**
+ * struct arasan_nfc - Defines the Arasan NAND flash controller driver instance
+ * @dev: Pointer to the device structure
+ * @base: Remapped register area
+ * @controller_clk: Pointer to the system clock
+ * @bus_clk: Pointer to the flash clock
+ * @controller: Base controller structure
+ * @chips: List of all NAND chips attached to the controller
+ * @assigned_cs: Bitmask describing already assigned CS lines
+ * @cur_clk: Current clock rate
+ */
+struct arasan_nfc {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *controller_clk;
+ struct clk *bus_clk;
+ struct nand_controller controller;
+ struct list_head chips;
+ unsigned long assigned_cs;
+ unsigned int cur_clk;
+};
+
+static struct anand *to_anand(struct nand_chip *nand)
+{
+ return container_of(nand, struct anand, chip);
+}
+
+static struct arasan_nfc *to_anfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct arasan_nfc, controller);
+}
+
+static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val,
+ val & event, 0,
+ ANFC_DFLT_TIMEOUT_US);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event);
+ return -ETIMEDOUT;
+ }
+
+ writel_relaxed(event, nfc->base + INTR_STS_REG);
+
+ return 0;
+}
+
+static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip,
+ unsigned int timeout_ms)
+{
+ struct anand *anand = to_anand(chip);
+ u32 val;
+ int ret;
+
+ /* There is no R/B interrupt, we must poll a register */
+ ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val,
+ val & BIT(anand->rb),
+ 1, timeout_ms * 1000);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n",
+ readl_relaxed(nfc->base + READY_STS_REG));
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
+{
+ writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG);
+ writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG);
+ writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG);
+ writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG);
+ writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG);
+}
+
+static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
+ unsigned int *pktsize)
+{
+ unsigned int nb, sz;
+
+ for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) {
+ sz = len / nb;
+ if (sz <= ANFC_MAX_PKT_SIZE)
+ break;
+ }
+
+ if (sz * nb != len)
+ return -ENOTSUPP;
+
+ if (steps)
+ *steps = nb;
+
+ if (pktsize)
+ *pktsize = sz;
+
+ return 0;
+}
+
+static int anfc_select_target(struct nand_chip *chip, int target)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ int ret;
+
+ /* Update the controller timings and the potential ECC configuration */
+ writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
+
+ /* Update clock frequency */
+ if (nfc->cur_clk != anand->clk) {
+ clk_disable_unprepare(nfc->bus_clk);
+ ret = clk_set_rate(nfc->bus_clk, anand->clk);
+ if (ret) {
+ dev_err(nfc->dev, "Failed to change clock rate\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->bus_clk);
+ if (ret) {
+ dev_err(nfc->dev,
+ "Failed to re-enable the bus clock\n");
+ return ret;
+ }
+
+ nfc->cur_clk = anand->clk;
+ }
+
+ return 0;
+}
+
+/*
+ * When using the embedded hardware ECC engine, the controller is in charge of
+ * feeding the engine with, first, the ECC residue present in the data array.
+ * A typical read operation is:
+ * 1/ Assert the read operation by sending the relevant command/address cycles
+ * but targeting the column of the first ECC bytes in the OOB area instead of
+ * the main data directly.
+ * 2/ After having read the relevant number of ECC bytes, the controller uses
+ * the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command
+ * Register" to move the pointer back at the beginning of the main data.
+ * 3/ It will read the content of the main area for a given size (pktsize) and
+ * will feed the ECC engine with this buffer again.
+ * 4/ The ECC engine derives the ECC bytes for the given data and compare them
+ * with the ones already received. It eventually trigger status flags and
+ * then set the "Buffer Read Ready" flag.
+ * 5/ The corrected data is then available for reading from the data port
+ * register.
+ *
+ * The hardware BCH ECC engine is known to be inconstent in BCH mode and never
+ * reports uncorrectable errors. Because of this bug, we have to use the
+ * software BCH implementation in the read path.
+ */
+static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anand *anand = to_anand(chip);
+ unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+ unsigned int max_bitflips = 0;
+ dma_addr_t dma_addr;
+ int step, ret;
+ struct anfc_op nfc_op = {
+ .pkt_reg =
+ PKT_SIZE(chip->ecc.size) |
+ PKT_STEPS(chip->ecc.steps),
+ .addr1_reg =
+ (page & 0xFF) << (8 * (anand->caddr_cycles)) |
+ (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
+ .addr2_reg =
+ ((page >> 16) & 0xFF) |
+ ADDR2_STRENGTH(anand->strength) |
+ ADDR2_CS(anand->cs),
+ .cmd_reg =
+ CMD_1(NAND_CMD_READ0) |
+ CMD_2(NAND_CMD_READSTART) |
+ CMD_PAGE_SIZE(anand->page_sz) |
+ CMD_DMA_ENABLE |
+ CMD_NADDRS(anand->caddr_cycles +
+ anand->raddr_cycles),
+ .prog_reg = PROG_PGRD,
+ };
+
+ dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_addr)) {
+ dev_err(nfc->dev, "Buffer mapping error");
+ return -EIO;
+ }
+
+ writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
+ writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
+
+ anfc_trigger_op(nfc, &nfc_op);
+
+ ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+ dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(nfc->dev, "Error reading page %d\n", page);
+ return ret;
+ }
+
+ /* Store the raw OOB bytes as well */
+ ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi,
+ mtd->oobsize, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * For each step, compute by softare the BCH syndrome over the raw data.
+ * Compare the theoretical amount of errors and compare with the
+ * hardware engine feedback.
+ */
+ for (step = 0; step < chip->ecc.steps; step++) {
+ u8 *raw_buf = &buf[step * chip->ecc.size];
+ unsigned int bit, byte;
+ int bf, i;
+
+ /* Extract the syndrome, it is not necessarily aligned */
+ memset(anand->hw_ecc, 0, chip->ecc.bytes);
+ nand_extract_bits(anand->hw_ecc, 0,
+ &chip->oob_poi[mtd->oobsize - anand->ecc_total],
+ anand->ecc_bits * step, anand->ecc_bits);
+
+ bf = bch_decode(anand->bch, raw_buf, chip->ecc.size,
+ anand->hw_ecc, NULL, NULL, anand->errloc);
+ if (!bf) {
+ continue;
+ } else if (bf > 0) {
+ for (i = 0; i < bf; i++) {
+ /* Only correct the data, not the syndrome */
+ if (anand->errloc[i] < (chip->ecc.size * 8)) {
+ bit = BIT(anand->errloc[i] & 7);
+ byte = anand->errloc[i] >> 3;
+ raw_buf[byte] ^= bit;
+ }
+ }
+
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+
+ continue;
+ }
+
+ bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
+ NULL, 0, NULL, 0,
+ chip->ecc.strength);
+ if (bf > 0) {
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+ memset(raw_buf, 0xFF, chip->ecc.size);
+ } else if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ }
+ }
+
+ return 0;
+}
+
+static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = anfc_select_target(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
+};
+
+static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+ dma_addr_t dma_addr;
+ u8 status;
+ int ret;
+ struct anfc_op nfc_op = {
+ .pkt_reg =
+ PKT_SIZE(chip->ecc.size) |
+ PKT_STEPS(chip->ecc.steps),
+ .addr1_reg =
+ (page & 0xFF) << (8 * (anand->caddr_cycles)) |
+ (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
+ .addr2_reg =
+ ((page >> 16) & 0xFF) |
+ ADDR2_STRENGTH(anand->strength) |
+ ADDR2_CS(anand->cs),
+ .cmd_reg =
+ CMD_1(NAND_CMD_SEQIN) |
+ CMD_2(NAND_CMD_PAGEPROG) |
+ CMD_PAGE_SIZE(anand->page_sz) |
+ CMD_DMA_ENABLE |
+ CMD_NADDRS(anand->caddr_cycles +
+ anand->raddr_cycles) |
+ CMD_ECC_ENABLE,
+ .prog_reg = PROG_PGPROG,
+ };
+
+ writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG);
+ writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) |
+ ECC_SP_ADDRS(anand->caddr_cycles),
+ nfc->base + ECC_SP_REG);
+
+ dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_addr)) {
+ dev_err(nfc->dev, "Buffer mapping error");
+ return -EIO;
+ }
+
+ writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
+ writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
+
+ anfc_trigger_op(nfc, &nfc_op);
+ ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+ dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE);
+ if (ret) {
+ dev_err(nfc->dev, "Error writing page %d\n", page);
+ return ret;
+ }
+
+ /* Spare data is not protected */
+ if (oob_required) {
+ ret = nand_write_oob_std(chip, page);
+ if (ret)
+ return ret;
+ }
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = anfc_select_target(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
+};
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static int anfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct anfc_op *nfc_op)
+{
+ struct anand *anand = to_anand(chip);
+ const struct nand_op_instr *instr = NULL;
+ bool first_cmd = true;
+ unsigned int op_id;
+ int ret, i;
+
+ memset(nfc_op, 0, sizeof(*nfc_op));
+ nfc_op->addr2_reg = ADDR2_CS(anand->cs);
+ nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz);
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs, pktsize;
+ const u8 *addrs;
+ u8 *buf;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd)
+ nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode);
+ else
+ nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode);
+
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ nfc_op->cmd_reg |= CMD_NADDRS(naddrs);
+
+ for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) {
+ if (i < 4)
+ nfc_op->addr1_reg |= (u32)addrs[i] << i * 8;
+ else
+ nfc_op->addr2_reg |= addrs[i];
+ }
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->read = true;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ buf = instr->ctx.data.buf.in;
+ nfc_op->buf = &buf[offset];
+ nfc_op->len = nand_subop_get_data_len(subop, op_id);
+ ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps,
+ &pktsize);
+ if (ret)
+ return ret;
+
+ /*
+ * Number of DATA cycles must be aligned on 4, this
+ * means the controller might read/write more than
+ * requested. This is harmless most of the time as extra
+ * DATA are discarded in the write path and read pointer
+ * adjusted in the read path.
+ *
+ * FIXME: The core should mark operations where
+ * reading/writing more is allowed so the exec_op()
+ * implementation can take the right decision when the
+ * alignment constraint is not met: adjust the number of
+ * DATA cycles when it's allowed, reject the operation
+ * otherwise.
+ */
+ nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) |
+ PKT_STEPS(nfc_op->steps);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
+{
+ unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps;
+ unsigned int last_len = nfc_op->len % 4;
+ unsigned int offset, dir;
+ u8 *buf = nfc_op->buf;
+ int ret, i;
+
+ for (i = 0; i < nfc_op->steps; i++) {
+ dir = nfc_op->read ? READ_READY : WRITE_READY;
+ ret = anfc_wait_for_event(nfc, dir);
+ if (ret) {
+ dev_err(nfc->dev, "PIO %s ready signal not received\n",
+ nfc_op->read ? "Read" : "Write");
+ return ret;
+ }
+
+ offset = i * (dwords * 4);
+ if (nfc_op->read)
+ ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
+ dwords);
+ else
+ iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
+ dwords);
+ }
+
+ if (last_len) {
+ u32 remainder;
+
+ offset = nfc_op->len - last_len;
+
+ if (nfc_op->read) {
+ remainder = readl_relaxed(nfc->base + DATA_PORT_REG);
+ memcpy(&buf[offset], &remainder, last_len);
+ } else {
+ memcpy(&remainder, &buf[offset], last_len);
+ writel_relaxed(remainder, nfc->base + DATA_PORT_REG);
+ }
+ }
+
+ return anfc_wait_for_event(nfc, XFER_COMPLETE);
+}
+
+static int anfc_misc_data_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ u32 prog_reg)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ int ret;
+
+ ret = anfc_parse_instructions(chip, subop, &nfc_op);
+ if (ret)
+ return ret;
+
+ nfc_op.prog_reg = prog_reg;
+ anfc_trigger_op(nfc, &nfc_op);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ return anfc_rw_pio_op(nfc, &nfc_op);
+}
+
+static int anfc_param_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM);
+}
+
+static int anfc_data_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_PGRD);
+}
+
+static int anfc_param_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE);
+}
+
+static int anfc_data_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG);
+}
+
+static int anfc_misc_zerolen_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ u32 prog_reg)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ int ret;
+
+ ret = anfc_parse_instructions(chip, subop, &nfc_op);
+ if (ret)
+ return ret;
+
+ nfc_op.prog_reg = prog_reg;
+ anfc_trigger_op(nfc, &nfc_op);
+
+ ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+ if (ret)
+ return ret;
+
+ if (nfc_op.rdy_timeout_ms)
+ ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+
+ return ret;
+}
+
+static int anfc_status_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ u32 tmp;
+ int ret;
+
+ /* See anfc_check_op() for details about this constraint */
+ if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS)
+ return -ENOTSUPP;
+
+ ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS);
+ if (ret)
+ return ret;
+
+ tmp = readl_relaxed(nfc->base + FLASH_STS_REG);
+ memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1);
+
+ return 0;
+}
+
+static int anfc_reset_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST);
+}
+
+static int anfc_erase_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE);
+}
+
+static int anfc_wait_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ int ret;
+
+ ret = anfc_parse_instructions(chip, subop, &nfc_op);
+ if (ret)
+ return ret;
+
+ return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+}
+
+static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ anfc_param_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_param_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_data_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_data_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_reset_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_erase_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_status_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_wait_type_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int anfc_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ int op_id;
+
+ /*
+ * The controller abstracts all the NAND operations and do not support
+ * data only operations.
+ *
+ * TODO: The nand_op_parser framework should be extended to
+ * support custom checks on DATA instructions.
+ */
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC)
+ return -ENOTSUPP;
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
+ return -ENOTSUPP;
+
+ if (anfc_pkt_len_config(instr->ctx.data.len, 0, 0))
+ return -ENOTSUPP;
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * The controller does not allow to proceed with a CMD+DATA_IN cycle
+ * manually on the bus by reading data from the data register. Instead,
+ * the controller abstract a status read operation with its own status
+ * register after ordering a read status operation. Hence, we cannot
+ * support any CMD+DATA_IN operation other than a READ STATUS.
+ *
+ * TODO: The nand_op_parser() framework should be extended to describe
+ * fixed patterns instead of open-coding this check here.
+ */
+ if (op->ninstrs == 2 &&
+ op->instrs[0].type == NAND_OP_CMD_INSTR &&
+ op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS &&
+ op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
+ return -ENOTSUPP;
+
+ return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true);
+}
+
+static int anfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int ret;
+
+ if (check_only)
+ return anfc_check_op(chip, op);
+
+ ret = anfc_select_target(chip, op->cs);
+ if (ret)
+ return ret;
+
+ return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only);
+}
+
+static int anfc_setup_interface(struct nand_chip *chip, int target,
+ const struct nand_interface_config *conf)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct device_node *np = nfc->dev->of_node;
+ const struct nand_sdr_timings *sdr;
+ const struct nand_nvddr_timings *nvddr;
+
+ if (nand_interface_is_nvddr(conf)) {
+ nvddr = nand_get_nvddr_timings(conf);
+ if (IS_ERR(nvddr))
+ return PTR_ERR(nvddr);
+
+ /*
+ * The controller only supports data payload requests which are
+ * a multiple of 4. In practice, most data accesses are 4-byte
+ * aligned and this is not an issue. However, rounding up will
+ * simply be refused by the controller if we reached the end of
+ * the device *and* we are using the NV-DDR interface(!). In
+ * this situation, unaligned data requests ending at the device
+ * boundary will confuse the controller and cannot be performed.
+ *
+ * This is something that happens in nand_read_subpage() when
+ * selecting software ECC support and must be avoided.
+ */
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT)
+ return -ENOTSUPP;
+ } else {
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+ }
+
+ if (target < 0)
+ return 0;
+
+ if (nand_interface_is_sdr(conf))
+ anand->timings = DIFACE_SDR |
+ DIFACE_SDR_MODE(conf->timings.mode);
+ else
+ anand->timings = DIFACE_NVDDR |
+ DIFACE_DDR_MODE(conf->timings.mode);
+
+ if (nand_interface_is_sdr(conf)) {
+ anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ } else {
+ /* ONFI timings are defined in picoseconds */
+ anand->clk = div_u64((u64)NSEC_PER_SEC * 1000,
+ conf->timings.nvddr.tCK_min);
+ }
+
+ /*
+ * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
+ * with f > 90MHz (default clock is 100MHz) but signals are unstable
+ * with higher modes. Hence we decrease a little bit the clock rate to
+ * 80MHz when using SDR modes 2-5 with this SoC.
+ */
+ if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
+ nand_interface_is_sdr(conf) && conf->timings.mode >= 2)
+ anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
+
+ return 0;
+}
+
+static int anfc_calc_hw_ecc_bytes(int step_size, int strength)
+{
+ unsigned int bch_gf_mag, ecc_bits;
+
+ switch (step_size) {
+ case SZ_512:
+ bch_gf_mag = 13;
+ break;
+ case SZ_1K:
+ bch_gf_mag = 14;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ecc_bits = bch_gf_mag * strength;
+
+ return DIV_ROUND_UP(ecc_bits, 8);
+}
+
+static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12};
+
+static const int anfc_hw_ecc_1024_strengths[] = {24};
+
+static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = {
+ {
+ .stepsize = SZ_512,
+ .strengths = anfc_hw_ecc_512_strengths,
+ .nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths),
+ },
+ {
+ .stepsize = SZ_1K,
+ .strengths = anfc_hw_ecc_1024_strengths,
+ .nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths),
+ },
+};
+
+static const struct nand_ecc_caps anfc_hw_ecc_caps = {
+ .stepinfos = anfc_hw_ecc_step_infos,
+ .nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos),
+ .calc_ecc_bytes = anfc_calc_hw_ecc_bytes,
+};
+
+static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
+ struct nand_chip *chip)
+{
+ struct anand *anand = to_anand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset;
+ int ret;
+
+ switch (mtd->writesize) {
+ case SZ_512:
+ case SZ_2K:
+ case SZ_4K:
+ case SZ_8K:
+ case SZ_16K:
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize);
+ return -EINVAL;
+ }
+
+ ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ switch (ecc->strength) {
+ case 12:
+ anand->strength = 0x1;
+ break;
+ case 8:
+ anand->strength = 0x2;
+ break;
+ case 4:
+ anand->strength = 0x3;
+ break;
+ case 24:
+ anand->strength = 0x4;
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength);
+ return -EINVAL;
+ }
+
+ switch (ecc->size) {
+ case SZ_512:
+ bch_gf_mag = 13;
+ bch_prim_poly = 0x201b;
+ break;
+ case SZ_1K:
+ bch_gf_mag = 14;
+ bch_prim_poly = 0x4443;
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength);
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+
+ ecc->steps = mtd->writesize / ecc->size;
+ ecc->algo = NAND_ECC_ALGO_BCH;
+ anand->ecc_bits = bch_gf_mag * ecc->strength;
+ ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8);
+ anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8);
+ ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total;
+ anand->ecc_conf = ECC_CONF_COL(ecc_offset) |
+ ECC_CONF_LEN(anand->ecc_total) |
+ ECC_CONF_BCH_EN;
+
+ anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength,
+ sizeof(*anand->errloc), GFP_KERNEL);
+ if (!anand->errloc)
+ return -ENOMEM;
+
+ anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL);
+ if (!anand->hw_ecc)
+ return -ENOMEM;
+
+ /* Enforce bit swapping to fit the hardware */
+ anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true);
+ if (!anand->bch)
+ return -EINVAL;
+
+ ecc->read_page = anfc_sel_read_page_hw_ecc;
+ ecc->write_page = anfc_sel_write_page_hw_ecc;
+
+ return 0;
+}
+
+static int anfc_attach_chip(struct nand_chip *chip)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0;
+
+ if (mtd->writesize <= SZ_512)
+ anand->caddr_cycles = 1;
+ else
+ anand->caddr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ anand->raddr_cycles = 3;
+ else
+ anand->raddr_cycles = 2;
+
+ switch (mtd->writesize) {
+ case 512:
+ anand->page_sz = 0;
+ break;
+ case 1024:
+ anand->page_sz = 5;
+ break;
+ case 2048:
+ anand->page_sz = 1;
+ break;
+ case 4096:
+ anand->page_sz = 2;
+ break;
+ case 8192:
+ anand->page_sz = 3;
+ break;
+ case 16384:
+ anand->page_sz = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* These hooks are valid for all ECC providers */
+ chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = anfc_init_hw_ecc_controller(nfc, chip);
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
+ chip->ecc.engine_type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void anfc_detach_chip(struct nand_chip *chip)
+{
+ struct anand *anand = to_anand(chip);
+
+ if (anand->bch)
+ bch_free(anand->bch);
+}
+
+static const struct nand_controller_ops anfc_ops = {
+ .exec_op = anfc_exec_op,
+ .setup_interface = anfc_setup_interface,
+ .attach_chip = anfc_attach_chip,
+ .detach_chip = anfc_detach_chip,
+};
+
+static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
+{
+ struct anand *anand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int cs, rb, ret;
+
+ anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL);
+ if (!anand)
+ return -ENOMEM;
+
+ /* We do not support multiple CS per chip yet */
+ if (of_property_count_elems_of_size(np, "reg", sizeof(u32)) != 1) {
+ dev_err(nfc->dev, "Invalid reg property\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(np, "reg", &cs);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "nand-rb", &rb);
+ if (ret)
+ return ret;
+
+ if (cs >= ANFC_MAX_CS || rb >= ANFC_MAX_CS) {
+ dev_err(nfc->dev, "Wrong CS %d or RB %d\n", cs, rb);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(nfc->dev, "Already assigned CS %d\n", cs);
+ return -EINVAL;
+ }
+
+ anand->cs = cs;
+ anand->rb = rb;
+
+ chip = &anand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = nfc->dev;
+ chip->controller = &nfc->controller;
+ chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
+ NAND_USES_DMA;
+
+ nand_set_flash_node(chip, np);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "NAND label property is mandatory\n");
+ return -EINVAL;
+ }
+
+ ret = nand_scan(chip, 1);
+ if (ret) {
+ dev_err(nfc->dev, "Scan operation failed\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&anand->node, &nfc->chips);
+
+ return 0;
+}
+
+static void anfc_chips_cleanup(struct arasan_nfc *nfc)
+{
+ struct anand *anand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(anand, tmp, &nfc->chips, node) {
+ chip = &anand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&anand->node);
+ }
+}
+
+static int anfc_chips_init(struct arasan_nfc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node, *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (!nchips || nchips > ANFC_MAX_CS) {
+ dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
+ nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = anfc_chip_init(nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ anfc_chips_cleanup(nfc);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void anfc_reset(struct arasan_nfc *nfc)
+{
+ /* Disable interrupt signals */
+ writel_relaxed(0, nfc->base + INTR_SIG_EN_REG);
+
+ /* Enable interrupt status */
+ writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG);
+}
+
+static int anfc_probe(struct platform_device *pdev)
+{
+ struct arasan_nfc *nfc;
+ int ret;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = &pdev->dev;
+ nand_controller_init(&nfc->controller);
+ nfc->controller.ops = &anfc_ops;
+ INIT_LIST_HEAD(&nfc->chips);
+
+ nfc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->base))
+ return PTR_ERR(nfc->base);
+
+ anfc_reset(nfc);
+
+ nfc->controller_clk = devm_clk_get(&pdev->dev, "controller");
+ if (IS_ERR(nfc->controller_clk))
+ return PTR_ERR(nfc->controller_clk);
+
+ nfc->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(nfc->bus_clk))
+ return PTR_ERR(nfc->bus_clk);
+
+ ret = clk_prepare_enable(nfc->controller_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(nfc->bus_clk);
+ if (ret)
+ goto disable_controller_clk;
+
+ ret = anfc_chips_init(nfc);
+ if (ret)
+ goto disable_bus_clk;
+
+ platform_set_drvdata(pdev, nfc);
+
+ return 0;
+
+disable_bus_clk:
+ clk_disable_unprepare(nfc->bus_clk);
+
+disable_controller_clk:
+ clk_disable_unprepare(nfc->controller_clk);
+
+ return ret;
+}
+
+static int anfc_remove(struct platform_device *pdev)
+{
+ struct arasan_nfc *nfc = platform_get_drvdata(pdev);
+
+ anfc_chips_cleanup(nfc);
+
+ clk_disable_unprepare(nfc->bus_clk);
+ clk_disable_unprepare(nfc->controller_clk);
+
+ return 0;
+}
+
+static const struct of_device_id anfc_ids[] = {
+ {
+ .compatible = "xlnx,zynqmp-nand-controller",
+ },
+ {
+ .compatible = "arasan,nfc-v3p10",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, anfc_ids);
+
+static struct platform_driver anfc_driver = {
+ .driver = {
+ .name = "arasan-nand-controller",
+ .of_match_table = anfc_ids,
+ },
+ .probe = anfc_probe,
+ .remove = anfc_remove,
+};
+module_platform_driver(anfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Punnaiah Choudary Kalluri <punnaia@xilinx.com>");
+MODULE_AUTHOR("Naga Sureshkumar Relli <nagasure@xilinx.com>");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/atmel/Makefile b/drivers/mtd/nand/raw/atmel/Makefile
new file mode 100644
index 000000000..27c2dd50e
--- /dev/null
+++ b/drivers/mtd/nand/raw/atmel/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MTD_NAND_ATMEL) += atmel-nand-controller.o atmel-pmecc.o
+
+atmel-nand-controller-objs := nand-controller.o
+atmel-pmecc-objs := pmecc.o
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
new file mode 100644
index 000000000..0d84f8156
--- /dev/null
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -0,0 +1,2668 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 ATMEL
+ * Copyright 2017 Free Electrons
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ * Derived from the atmel_nand.c driver which contained the following
+ * copyrights:
+ *
+ * Copyright 2003 Rick Bronson
+ *
+ * Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
+ * Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * Derived from drivers/mtd/spia.c (removed in v3.8)
+ * Copyright 2000 Steven J. Hill (sjhill@cotw.com)
+ *
+ *
+ * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
+ * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
+ *
+ * Derived from Das U-Boot source code
+ * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
+ * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ * Add Programmable Multibit ECC support for various AT91 SoC
+ * Copyright 2012 ATMEL, Hong Xu
+ *
+ * Add Nand Flash Controller support for SAMA5 SoC
+ * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
+ *
+ * A few words about the naming convention in this file. This convention
+ * applies to structure and function names.
+ *
+ * Prefixes:
+ *
+ * - atmel_nand_: all generic structures/functions
+ * - atmel_smc_nand_: all structures/functions specific to the SMC interface
+ * (at91sam9 and avr32 SoCs)
+ * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
+ * (sama5 SoCs and later)
+ * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
+ * that is available in the HSMC block
+ * - <soc>_nand_: all SoC specific structures/functions
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/genalloc.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/atmel-matrix.h>
+#include <linux/mfd/syscon/atmel-smc.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <soc/at91/atmel-sfr.h>
+
+#include "pmecc.h"
+
+#define ATMEL_HSMC_NFC_CFG 0x0
+#define ATMEL_HSMC_NFC_CFG_SPARESIZE(x) (((x) / 4) << 24)
+#define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK GENMASK(30, 24)
+#define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul) (((cyc) << 16) | ((mul) << 20))
+#define ATMEL_HSMC_NFC_CFG_DTO_MAX GENMASK(22, 16)
+#define ATMEL_HSMC_NFC_CFG_RBEDGE BIT(13)
+#define ATMEL_HSMC_NFC_CFG_FALLING_EDGE BIT(12)
+#define ATMEL_HSMC_NFC_CFG_RSPARE BIT(9)
+#define ATMEL_HSMC_NFC_CFG_WSPARE BIT(8)
+#define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK GENMASK(2, 0)
+#define ATMEL_HSMC_NFC_CFG_PAGESIZE(x) (fls((x) / 512) - 1)
+
+#define ATMEL_HSMC_NFC_CTRL 0x4
+#define ATMEL_HSMC_NFC_CTRL_EN BIT(0)
+#define ATMEL_HSMC_NFC_CTRL_DIS BIT(1)
+
+#define ATMEL_HSMC_NFC_SR 0x8
+#define ATMEL_HSMC_NFC_IER 0xc
+#define ATMEL_HSMC_NFC_IDR 0x10
+#define ATMEL_HSMC_NFC_IMR 0x14
+#define ATMEL_HSMC_NFC_SR_ENABLED BIT(1)
+#define ATMEL_HSMC_NFC_SR_RB_RISE BIT(4)
+#define ATMEL_HSMC_NFC_SR_RB_FALL BIT(5)
+#define ATMEL_HSMC_NFC_SR_BUSY BIT(8)
+#define ATMEL_HSMC_NFC_SR_WR BIT(11)
+#define ATMEL_HSMC_NFC_SR_CSID GENMASK(14, 12)
+#define ATMEL_HSMC_NFC_SR_XFRDONE BIT(16)
+#define ATMEL_HSMC_NFC_SR_CMDDONE BIT(17)
+#define ATMEL_HSMC_NFC_SR_DTOE BIT(20)
+#define ATMEL_HSMC_NFC_SR_UNDEF BIT(21)
+#define ATMEL_HSMC_NFC_SR_AWB BIT(22)
+#define ATMEL_HSMC_NFC_SR_NFCASE BIT(23)
+#define ATMEL_HSMC_NFC_SR_ERRORS (ATMEL_HSMC_NFC_SR_DTOE | \
+ ATMEL_HSMC_NFC_SR_UNDEF | \
+ ATMEL_HSMC_NFC_SR_AWB | \
+ ATMEL_HSMC_NFC_SR_NFCASE)
+#define ATMEL_HSMC_NFC_SR_RBEDGE(x) BIT((x) + 24)
+
+#define ATMEL_HSMC_NFC_ADDR 0x18
+#define ATMEL_HSMC_NFC_BANK 0x1c
+
+#define ATMEL_NFC_MAX_RB_ID 7
+
+#define ATMEL_NFC_SRAM_SIZE 0x2400
+
+#define ATMEL_NFC_CMD(pos, cmd) ((cmd) << (((pos) * 8) + 2))
+#define ATMEL_NFC_VCMD2 BIT(18)
+#define ATMEL_NFC_ACYCLE(naddrs) ((naddrs) << 19)
+#define ATMEL_NFC_CSID(cs) ((cs) << 22)
+#define ATMEL_NFC_DATAEN BIT(25)
+#define ATMEL_NFC_NFCWR BIT(26)
+
+#define ATMEL_NFC_MAX_ADDR_CYCLES 5
+
+#define ATMEL_NAND_ALE_OFFSET BIT(21)
+#define ATMEL_NAND_CLE_OFFSET BIT(22)
+
+#define DEFAULT_TIMEOUT_MS 1000
+#define MIN_DMA_LEN 128
+
+static bool atmel_nand_avoid_dma __read_mostly;
+
+MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
+module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
+
+enum atmel_nand_rb_type {
+ ATMEL_NAND_NO_RB,
+ ATMEL_NAND_NATIVE_RB,
+ ATMEL_NAND_GPIO_RB,
+};
+
+struct atmel_nand_rb {
+ enum atmel_nand_rb_type type;
+ union {
+ struct gpio_desc *gpio;
+ int id;
+ };
+};
+
+struct atmel_nand_cs {
+ int id;
+ struct atmel_nand_rb rb;
+ struct gpio_desc *csgpio;
+ struct {
+ void __iomem *virt;
+ dma_addr_t dma;
+ } io;
+
+ struct atmel_smc_cs_conf smcconf;
+};
+
+struct atmel_nand {
+ struct list_head node;
+ struct device *dev;
+ struct nand_chip base;
+ struct atmel_nand_cs *activecs;
+ struct atmel_pmecc_user *pmecc;
+ struct gpio_desc *cdgpio;
+ int numcs;
+ struct atmel_nand_cs cs[];
+};
+
+static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct atmel_nand, base);
+}
+
+enum atmel_nfc_data_xfer {
+ ATMEL_NFC_NO_DATA,
+ ATMEL_NFC_READ_DATA,
+ ATMEL_NFC_WRITE_DATA,
+};
+
+struct atmel_nfc_op {
+ u8 cs;
+ u8 ncmds;
+ u8 cmds[2];
+ u8 naddrs;
+ u8 addrs[5];
+ enum atmel_nfc_data_xfer data;
+ u32 wait;
+ u32 errors;
+};
+
+struct atmel_nand_controller;
+struct atmel_nand_controller_caps;
+
+struct atmel_nand_controller_ops {
+ int (*probe)(struct platform_device *pdev,
+ const struct atmel_nand_controller_caps *caps);
+ int (*remove)(struct atmel_nand_controller *nc);
+ void (*nand_init)(struct atmel_nand_controller *nc,
+ struct atmel_nand *nand);
+ int (*ecc_init)(struct nand_chip *chip);
+ int (*setup_interface)(struct atmel_nand *nand, int csline,
+ const struct nand_interface_config *conf);
+ int (*exec_op)(struct atmel_nand *nand,
+ const struct nand_operation *op, bool check_only);
+};
+
+struct atmel_nand_controller_caps {
+ bool has_dma;
+ bool legacy_of_bindings;
+ u32 ale_offs;
+ u32 cle_offs;
+ const char *ebi_csa_regmap_name;
+ const struct atmel_nand_controller_ops *ops;
+};
+
+struct atmel_nand_controller {
+ struct nand_controller base;
+ const struct atmel_nand_controller_caps *caps;
+ struct device *dev;
+ struct regmap *smc;
+ struct dma_chan *dmac;
+ struct atmel_pmecc *pmecc;
+ struct list_head chips;
+ struct clk *mck;
+};
+
+static inline struct atmel_nand_controller *
+to_nand_controller(struct nand_controller *ctl)
+{
+ return container_of(ctl, struct atmel_nand_controller, base);
+}
+
+struct atmel_smc_nand_ebi_csa_cfg {
+ u32 offs;
+ u32 nfd0_on_d16;
+};
+
+struct atmel_smc_nand_controller {
+ struct atmel_nand_controller base;
+ struct regmap *ebi_csa_regmap;
+ struct atmel_smc_nand_ebi_csa_cfg *ebi_csa;
+};
+
+static inline struct atmel_smc_nand_controller *
+to_smc_nand_controller(struct nand_controller *ctl)
+{
+ return container_of(to_nand_controller(ctl),
+ struct atmel_smc_nand_controller, base);
+}
+
+struct atmel_hsmc_nand_controller {
+ struct atmel_nand_controller base;
+ struct {
+ struct gen_pool *pool;
+ void __iomem *virt;
+ dma_addr_t dma;
+ } sram;
+ const struct atmel_hsmc_reg_layout *hsmc_layout;
+ struct regmap *io;
+ struct atmel_nfc_op op;
+ struct completion complete;
+ u32 cfg;
+ int irq;
+
+ /* Only used when instantiating from legacy DT bindings. */
+ struct clk *clk;
+};
+
+static inline struct atmel_hsmc_nand_controller *
+to_hsmc_nand_controller(struct nand_controller *ctl)
+{
+ return container_of(to_nand_controller(ctl),
+ struct atmel_hsmc_nand_controller, base);
+}
+
+static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
+{
+ op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
+ op->wait ^= status & op->wait;
+
+ return !op->wait || op->errors;
+}
+
+static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
+{
+ struct atmel_hsmc_nand_controller *nc = data;
+ u32 sr, rcvd;
+ bool done;
+
+ regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
+
+ rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
+ done = atmel_nfc_op_done(&nc->op, sr);
+
+ if (rcvd)
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
+
+ if (done)
+ complete(&nc->complete);
+
+ return rcvd ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
+ unsigned int timeout_ms)
+{
+ int ret;
+
+ if (!timeout_ms)
+ timeout_ms = DEFAULT_TIMEOUT_MS;
+
+ if (poll) {
+ u32 status;
+
+ ret = regmap_read_poll_timeout(nc->base.smc,
+ ATMEL_HSMC_NFC_SR, status,
+ atmel_nfc_op_done(&nc->op,
+ status),
+ 0, timeout_ms * 1000);
+ } else {
+ init_completion(&nc->complete);
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
+ nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
+ ret = wait_for_completion_timeout(&nc->complete,
+ msecs_to_jiffies(timeout_ms));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
+ }
+
+ if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
+ dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+ if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
+ dev_err(nc->base.dev, "Access to an undefined area\n");
+ ret = -EIO;
+ }
+
+ if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
+ dev_err(nc->base.dev, "Access while busy\n");
+ ret = -EIO;
+ }
+
+ if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
+ dev_err(nc->base.dev, "Wrong access size\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void atmel_nand_dma_transfer_finished(void *data)
+{
+ struct completion *finished = data;
+
+ complete(finished);
+}
+
+static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
+ void *buf, dma_addr_t dev_dma, size_t len,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(finished);
+ dma_addr_t src_dma, dst_dma, buf_dma;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ buf_dma = dma_map_single(nc->dev, buf, len, dir);
+ if (dma_mapping_error(nc->dev, dev_dma)) {
+ dev_err(nc->dev,
+ "Failed to prepare a buffer for DMA access\n");
+ goto err;
+ }
+
+ if (dir == DMA_FROM_DEVICE) {
+ src_dma = dev_dma;
+ dst_dma = buf_dma;
+ } else {
+ src_dma = buf_dma;
+ dst_dma = dev_dma;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
+ goto err_unmap;
+ }
+
+ tx->callback = atmel_nand_dma_transfer_finished;
+ tx->callback_param = &finished;
+
+ cookie = dmaengine_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(nc->dev, "Failed to do DMA tx_submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(nc->dmac);
+ wait_for_completion(&finished);
+ dma_unmap_single(nc->dev, buf_dma, len, dir);
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(nc->dev, buf_dma, len, dir);
+
+err:
+ dev_dbg(nc->dev, "Fall back to CPU I/O\n");
+
+ return -EIO;
+}
+
+static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
+{
+ u8 *addrs = nc->op.addrs;
+ unsigned int op = 0;
+ u32 addr, val;
+ int i, ret;
+
+ nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
+
+ for (i = 0; i < nc->op.ncmds; i++)
+ op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
+
+ if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
+
+ op |= ATMEL_NFC_CSID(nc->op.cs) |
+ ATMEL_NFC_ACYCLE(nc->op.naddrs);
+
+ if (nc->op.ncmds > 1)
+ op |= ATMEL_NFC_VCMD2;
+
+ addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
+ (addrs[3] << 24);
+
+ if (nc->op.data != ATMEL_NFC_NO_DATA) {
+ op |= ATMEL_NFC_DATAEN;
+ nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
+
+ if (nc->op.data == ATMEL_NFC_WRITE_DATA)
+ op |= ATMEL_NFC_NFCWR;
+ }
+
+ /* Clear all flags. */
+ regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
+
+ /* Send the command. */
+ regmap_write(nc->io, op, addr);
+
+ ret = atmel_nfc_wait(nc, poll, 0);
+ if (ret)
+ dev_err(nc->base.dev,
+ "Failed to send NAND command (err = %d)!",
+ ret);
+
+ /* Reset the op state. */
+ memset(&nc->op, 0, sizeof(nc->op));
+
+ return ret;
+}
+
+static void atmel_nand_data_in(struct atmel_nand *nand, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct atmel_nand_controller *nc;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ /*
+ * If the controller supports DMA, the buffer address is DMA-able and
+ * len is long enough to make DMA transfers profitable, let's trigger
+ * a DMA transfer. If it fails, fallback to PIO mode.
+ */
+ if (nc->dmac && virt_addr_valid(buf) &&
+ len >= MIN_DMA_LEN && !force_8bit &&
+ !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
+ DMA_FROM_DEVICE))
+ return;
+
+ if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
+ ioread16_rep(nand->activecs->io.virt, buf, len / 2);
+ else
+ ioread8_rep(nand->activecs->io.virt, buf, len);
+}
+
+static void atmel_nand_data_out(struct atmel_nand *nand, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct atmel_nand_controller *nc;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ /*
+ * If the controller supports DMA, the buffer address is DMA-able and
+ * len is long enough to make DMA transfers profitable, let's trigger
+ * a DMA transfer. If it fails, fallback to PIO mode.
+ */
+ if (nc->dmac && virt_addr_valid(buf) &&
+ len >= MIN_DMA_LEN && !force_8bit &&
+ !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
+ len, DMA_TO_DEVICE))
+ return;
+
+ if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
+ iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
+ else
+ iowrite8_rep(nand->activecs->io.virt, buf, len);
+}
+
+static int atmel_nand_waitrdy(struct atmel_nand *nand, unsigned int timeout_ms)
+{
+ if (nand->activecs->rb.type == ATMEL_NAND_NO_RB)
+ return nand_soft_waitrdy(&nand->base, timeout_ms);
+
+ return nand_gpio_waitrdy(&nand->base, nand->activecs->rb.gpio,
+ timeout_ms);
+}
+
+static int atmel_hsmc_nand_waitrdy(struct atmel_nand *nand,
+ unsigned int timeout_ms)
+{
+ struct atmel_hsmc_nand_controller *nc;
+ u32 status, mask;
+
+ if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
+ return atmel_nand_waitrdy(nand, timeout_ms);
+
+ nc = to_hsmc_nand_controller(nand->base.controller);
+ mask = ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
+ return regmap_read_poll_timeout_atomic(nc->base.smc, ATMEL_HSMC_NFC_SR,
+ status, status & mask,
+ 10, timeout_ms * 1000);
+}
+
+static void atmel_nand_select_target(struct atmel_nand *nand,
+ unsigned int cs)
+{
+ nand->activecs = &nand->cs[cs];
+}
+
+static void atmel_hsmc_nand_select_target(struct atmel_nand *nand,
+ unsigned int cs)
+{
+ struct mtd_info *mtd = nand_to_mtd(&nand->base);
+ struct atmel_hsmc_nand_controller *nc;
+ u32 cfg = ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
+ ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
+ ATMEL_HSMC_NFC_CFG_RSPARE;
+
+ nand->activecs = &nand->cs[cs];
+ nc = to_hsmc_nand_controller(nand->base.controller);
+ if (nc->cfg == cfg)
+ return;
+
+ regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
+ ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
+ ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
+ ATMEL_HSMC_NFC_CFG_RSPARE |
+ ATMEL_HSMC_NFC_CFG_WSPARE,
+ cfg);
+ nc->cfg = cfg;
+}
+
+static int atmel_smc_nand_exec_instr(struct atmel_nand *nand,
+ const struct nand_op_instr *instr)
+{
+ struct atmel_nand_controller *nc;
+ unsigned int i;
+
+ nc = to_nand_controller(nand->base.controller);
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ nand->activecs->io.virt + nc->caps->cle_offs);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb(instr->ctx.addr.addrs[i],
+ nand->activecs->io.virt + nc->caps->ale_offs);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ atmel_nand_data_in(nand, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ atmel_nand_data_out(nand, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ return atmel_nand_waitrdy(nand,
+ instr->ctx.waitrdy.timeout_ms);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int atmel_smc_nand_exec_op(struct atmel_nand *nand,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ atmel_nand_select_target(nand, op->cs);
+ gpiod_set_value(nand->activecs->csgpio, 0);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = atmel_smc_nand_exec_instr(nand, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+ gpiod_set_value(nand->activecs->csgpio, 1);
+
+ return ret;
+}
+
+static int atmel_hsmc_exec_cmd_addr(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_hsmc_nand_controller *nc;
+ unsigned int i, j;
+
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ nc->op.cs = nand->activecs->id;
+ for (i = 0; i < subop->ninstrs; i++) {
+ const struct nand_op_instr *instr = &subop->instrs[i];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ nc->op.cmds[nc->op.ncmds++] = instr->ctx.cmd.opcode;
+ continue;
+ }
+
+ for (j = nand_subop_get_addr_start_off(subop, i);
+ j < nand_subop_get_num_addr_cyc(subop, i); j++) {
+ nc->op.addrs[nc->op.naddrs] = instr->ctx.addr.addrs[j];
+ nc->op.naddrs++;
+ }
+ }
+
+ return atmel_nfc_exec_op(nc, true);
+}
+
+static int atmel_hsmc_exec_rw(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr = subop->instrs;
+ struct atmel_nand *nand = to_atmel_nand(chip);
+
+ if (instr->type == NAND_OP_DATA_IN_INSTR)
+ atmel_nand_data_in(nand, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ else
+ atmel_nand_data_out(nand, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+
+ return 0;
+}
+
+static int atmel_hsmc_exec_waitrdy(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr = subop->instrs;
+ struct atmel_nand *nand = to_atmel_nand(chip);
+
+ return atmel_hsmc_nand_waitrdy(nand, instr->ctx.waitrdy.timeout_ms);
+}
+
+static const struct nand_op_parser atmel_hsmc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_cmd_addr,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+);
+
+static int atmel_hsmc_nand_exec_op(struct atmel_nand *nand,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int ret;
+
+ if (check_only)
+ return nand_op_parser_exec_op(&nand->base,
+ &atmel_hsmc_op_parser, op, true);
+
+ atmel_hsmc_nand_select_target(nand, op->cs);
+ ret = nand_op_parser_exec_op(&nand->base, &atmel_hsmc_op_parser, op,
+ false);
+
+ return ret;
+}
+
+static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
+ bool oob_required)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_hsmc_nand_controller *nc;
+ int ret = -EIO;
+
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ if (nc->base.dmac)
+ ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
+ nc->sram.dma, mtd->writesize,
+ DMA_TO_DEVICE);
+
+ /* Falling back to CPU copy. */
+ if (ret)
+ memcpy_toio(nc->sram.virt, buf, mtd->writesize);
+
+ if (oob_required)
+ memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+}
+
+static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
+ bool oob_required)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_hsmc_nand_controller *nc;
+ int ret = -EIO;
+
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ if (nc->base.dmac)
+ ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
+ mtd->writesize, DMA_FROM_DEVICE);
+
+ /* Falling back to CPU copy. */
+ if (ret)
+ memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
+
+ if (oob_required)
+ memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
+ mtd->oobsize);
+}
+
+static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_hsmc_nand_controller *nc;
+
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ if (column >= 0) {
+ nc->op.addrs[nc->op.naddrs++] = column;
+
+ /*
+ * 2 address cycles for the column offset on large page NANDs.
+ */
+ if (mtd->writesize > 512)
+ nc->op.addrs[nc->op.naddrs++] = column >> 8;
+ }
+
+ if (page >= 0) {
+ nc->op.addrs[nc->op.naddrs++] = page;
+ nc->op.addrs[nc->op.naddrs++] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ nc->op.addrs[nc->op.naddrs++] = page >> 16;
+ }
+}
+
+static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_nand_controller *nc;
+ int ret;
+
+ nc = to_nand_controller(chip->controller);
+
+ if (raw)
+ return 0;
+
+ ret = atmel_pmecc_enable(nand->pmecc, op);
+ if (ret)
+ dev_err(nc->dev,
+ "Failed to enable ECC engine (err = %d)\n", ret);
+
+ return ret;
+}
+
+static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+
+ if (!raw)
+ atmel_pmecc_disable(nand->pmecc);
+}
+
+static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand_controller *nc;
+ struct mtd_oob_region oobregion;
+ void *eccbuf;
+ int ret, i;
+
+ nc = to_nand_controller(chip->controller);
+
+ if (raw)
+ return 0;
+
+ ret = atmel_pmecc_wait_rdy(nand->pmecc);
+ if (ret) {
+ dev_err(nc->dev,
+ "Failed to transfer NAND page data (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ eccbuf = chip->oob_poi + oobregion.offset;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
+ eccbuf);
+ eccbuf += chip->ecc.bytes;
+ }
+
+ return 0;
+}
+
+static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
+ bool raw)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand_controller *nc;
+ struct mtd_oob_region oobregion;
+ int ret, i, max_bitflips = 0;
+ void *databuf, *eccbuf;
+
+ nc = to_nand_controller(chip->controller);
+
+ if (raw)
+ return 0;
+
+ ret = atmel_pmecc_wait_rdy(nand->pmecc);
+ if (ret) {
+ dev_err(nc->dev,
+ "Failed to read NAND page data (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ eccbuf = chip->oob_poi + oobregion.offset;
+ databuf = buf;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
+ eccbuf);
+ if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
+ ret = nand_check_erased_ecc_chunk(databuf,
+ chip->ecc.size,
+ eccbuf,
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+
+ if (ret >= 0) {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max(ret, max_bitflips);
+ } else {
+ mtd->ecc_stats.failed++;
+ }
+
+ databuf += chip->ecc.size;
+ eccbuf += chip->ecc.bytes;
+ }
+
+ return max_bitflips;
+}
+
+static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
+ bool oob_required, int page, bool raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ int ret;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
+ if (ret)
+ return ret;
+
+ nand_write_data_op(chip, buf, mtd->writesize, false);
+
+ ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
+ if (ret) {
+ atmel_pmecc_disable(nand->pmecc);
+ return ret;
+ }
+
+ atmel_nand_pmecc_disable(chip, raw);
+
+ nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int atmel_nand_pmecc_write_page(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
+}
+
+static int atmel_nand_pmecc_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
+}
+
+static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
+ bool oob_required, int page, bool raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
+ if (ret)
+ return ret;
+
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false, false);
+ if (ret)
+ goto out_disable;
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, false);
+ if (ret)
+ goto out_disable;
+
+ ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
+
+out_disable:
+ atmel_nand_pmecc_disable(chip, raw);
+
+ return ret;
+}
+
+static int atmel_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
+}
+
+static int atmel_nand_pmecc_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
+}
+
+static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
+ const u8 *buf, bool oob_required,
+ int page, bool raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_hsmc_nand_controller *nc;
+ int ret;
+
+ atmel_hsmc_nand_select_target(nand, chip->cur_cs);
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ atmel_nfc_copy_to_sram(chip, buf, false);
+
+ nc->op.cmds[0] = NAND_CMD_SEQIN;
+ nc->op.ncmds = 1;
+ atmel_nfc_set_op_addr(chip, page, 0x0);
+ nc->op.cs = nand->activecs->id;
+ nc->op.data = ATMEL_NFC_WRITE_DATA;
+
+ ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
+ if (ret)
+ return ret;
+
+ ret = atmel_nfc_exec_op(nc, false);
+ if (ret) {
+ atmel_nand_pmecc_disable(chip, raw);
+ dev_err(nc->base.dev,
+ "Failed to transfer NAND page data (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
+
+ atmel_nand_pmecc_disable(chip, raw);
+
+ if (ret)
+ return ret;
+
+ nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int atmel_hsmc_nand_pmecc_write_page(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
+ false);
+}
+
+static int atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
+ true);
+}
+
+static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
+ bool oob_required, int page,
+ bool raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_hsmc_nand_controller *nc;
+ int ret;
+
+ atmel_hsmc_nand_select_target(nand, chip->cur_cs);
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ /*
+ * Optimized read page accessors only work when the NAND R/B pin is
+ * connected to a native SoC R/B pin. If that's not the case, fallback
+ * to the non-optimized one.
+ */
+ if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
+ return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
+ raw);
+
+ nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
+
+ if (mtd->writesize > 512)
+ nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
+
+ atmel_nfc_set_op_addr(chip, page, 0x0);
+ nc->op.cs = nand->activecs->id;
+ nc->op.data = ATMEL_NFC_READ_DATA;
+
+ ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
+ if (ret)
+ return ret;
+
+ ret = atmel_nfc_exec_op(nc, false);
+ if (ret) {
+ atmel_nand_pmecc_disable(chip, raw);
+ dev_err(nc->base.dev,
+ "Failed to load NAND page data (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ atmel_nfc_copy_from_sram(chip, buf, true);
+
+ ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
+
+ atmel_nand_pmecc_disable(chip, raw);
+
+ return ret;
+}
+
+static int atmel_hsmc_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
+ false);
+}
+
+static int atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
+ true);
+}
+
+static int atmel_nand_pmecc_init(struct nand_chip *chip)
+{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_nand_controller *nc;
+ struct atmel_pmecc_user_req req;
+
+ nc = to_nand_controller(chip->controller);
+
+ if (!nc->pmecc) {
+ dev_err(nc->dev, "HW ECC not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (nc->caps->legacy_of_bindings) {
+ u32 val;
+
+ if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
+ &val))
+ chip->ecc.strength = val;
+
+ if (!of_property_read_u32(nc->dev->of_node,
+ "atmel,pmecc-sector-size",
+ &val))
+ chip->ecc.size = val;
+ }
+
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
+ req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
+ else if (chip->ecc.strength)
+ req.ecc.strength = chip->ecc.strength;
+ else if (requirements->strength)
+ req.ecc.strength = requirements->strength;
+ else
+ req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
+
+ if (chip->ecc.size)
+ req.ecc.sectorsize = chip->ecc.size;
+ else if (requirements->step_size)
+ req.ecc.sectorsize = requirements->step_size;
+ else
+ req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
+
+ req.pagesize = mtd->writesize;
+ req.oobsize = mtd->oobsize;
+
+ if (mtd->writesize <= 512) {
+ req.ecc.bytes = 4;
+ req.ecc.ooboffset = 0;
+ } else {
+ req.ecc.bytes = mtd->oobsize - 2;
+ req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
+ }
+
+ nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
+ if (IS_ERR(nand->pmecc))
+ return PTR_ERR(nand->pmecc);
+
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ chip->ecc.size = req.ecc.sectorsize;
+ chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
+ chip->ecc.strength = req.ecc.strength;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+
+ return 0;
+}
+
+static int atmel_nand_ecc_init(struct nand_chip *chip)
+{
+ struct atmel_nand_controller *nc;
+ int ret;
+
+ nc = to_nand_controller(chip->controller);
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ /*
+ * Nothing to do, the core will initialize everything for us.
+ */
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = atmel_nand_pmecc_init(chip);
+ if (ret)
+ return ret;
+
+ chip->ecc.read_page = atmel_nand_pmecc_read_page;
+ chip->ecc.write_page = atmel_nand_pmecc_write_page;
+ chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
+ chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
+ break;
+
+ default:
+ /* Other modes are not supported. */
+ dev_err(nc->dev, "Unsupported ECC mode: %d\n",
+ chip->ecc.engine_type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
+{
+ int ret;
+
+ ret = atmel_nand_ecc_init(chip);
+ if (ret)
+ return ret;
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ /* Adjust the ECC operations for the HSMC IP. */
+ chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
+ chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
+ chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
+ chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
+
+ return 0;
+}
+
+static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
+ const struct nand_interface_config *conf,
+ struct atmel_smc_cs_conf *smcconf)
+{
+ u32 ncycles, totalcycles, timeps, mckperiodps;
+ struct atmel_nand_controller *nc;
+ int ret;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ /* DDR interface not supported. */
+ if (!nand_interface_is_sdr(conf))
+ return -ENOTSUPP;
+
+ /*
+ * tRC < 30ns implies EDO mode. This controller does not support this
+ * mode.
+ */
+ if (conf->timings.sdr.tRC_min < 30000)
+ return -ENOTSUPP;
+
+ atmel_smc_cs_conf_init(smcconf);
+
+ mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
+ mckperiodps *= 1000;
+
+ /*
+ * Set write pulse timing. This one is easy to extract:
+ *
+ * NWE_PULSE = tWP
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
+ totalcycles = ncycles;
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * The write setup timing depends on the operation done on the NAND.
+ * All operations goes through the same data bus, but the operation
+ * type depends on the address we are writing to (ALE/CLE address
+ * lines).
+ * Since we have no way to differentiate the different operations at
+ * the SMC level, we must consider the worst case (the biggest setup
+ * time among all operation types):
+ *
+ * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
+ */
+ timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
+ conf->timings.sdr.tALS_min);
+ timeps = max(timeps, conf->timings.sdr.tDS_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
+ totalcycles += ncycles;
+ ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * As for the write setup timing, the write hold timing depends on the
+ * operation done on the NAND:
+ *
+ * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
+ */
+ timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
+ conf->timings.sdr.tALH_min);
+ timeps = max3(timeps, conf->timings.sdr.tDH_min,
+ conf->timings.sdr.tWH_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ totalcycles += ncycles;
+
+ /*
+ * The write cycle timing is directly matching tWC, but is also
+ * dependent on the other timings on the setup and hold timings we
+ * calculated earlier, which gives:
+ *
+ * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
+ ncycles = max(totalcycles, ncycles);
+ ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * We don't want the CS line to be toggled between each byte/word
+ * transfer to the NAND. The only way to guarantee that is to have the
+ * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
+ *
+ * NCS_WR_PULSE = NWE_CYCLE
+ */
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * As for the write setup timing, the read hold timing depends on the
+ * operation done on the NAND:
+ *
+ * NRD_HOLD = max(tREH, tRHOH)
+ */
+ timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ totalcycles = ncycles;
+
+ /*
+ * TDF = tRHZ - NRD_HOLD
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
+ ncycles -= totalcycles;
+
+ /*
+ * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
+ * we might end up with a config that does not fit in the TDF field.
+ * Just take the max value in this case and hope that the NAND is more
+ * tolerant than advertised.
+ */
+ if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
+ ncycles = ATMEL_SMC_MODE_TDF_MAX;
+ else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
+ ncycles = ATMEL_SMC_MODE_TDF_MIN;
+
+ smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
+ ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
+
+ /*
+ * Read pulse timing directly matches tRP:
+ *
+ * NRD_PULSE = tRP
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
+ totalcycles += ncycles;
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * The write cycle timing is directly matching tWC, but is also
+ * dependent on the setup and hold timings we calculated earlier,
+ * which gives:
+ *
+ * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
+ *
+ * NRD_SETUP is always 0.
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
+ ncycles = max(totalcycles, ncycles);
+ ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * We don't want the CS line to be toggled between each byte/word
+ * transfer from the NAND. The only way to guarantee that is to have
+ * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
+ *
+ * NCS_RD_PULSE = NRD_CYCLE
+ */
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /* Txxx timings are directly matching tXXX ones. */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TADL_SHIFT,
+ ncycles);
+ /*
+ * Version 4 of the ONFI spec mandates that tADL be at least 400
+ * nanoseconds, but, depending on the master clock rate, 400 ns may not
+ * fit in the tADL field of the SMC reg. We need to relax the check and
+ * accept the -ERANGE return code.
+ *
+ * Note that previous versions of the ONFI spec had a lower tADL_min
+ * (100 or 200 ns). It's not clear why this timing constraint got
+ * increased but it seems most NANDs are fine with values lower than
+ * 400ns, so we should be safe.
+ */
+ if (ret && ret != -ERANGE)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TAR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TRR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TWB_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /* Attach the CS line to the NFC logic. */
+ smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
+
+ /* Set the appropriate data bus width. */
+ if (nand->base.options & NAND_BUSWIDTH_16)
+ smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
+
+ /* Operate in NRD/NWE READ/WRITEMODE. */
+ smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
+ ATMEL_SMC_MODE_WRITEMODE_NWE;
+
+ return 0;
+}
+
+static int atmel_smc_nand_setup_interface(struct atmel_nand *nand,
+ int csline,
+ const struct nand_interface_config *conf)
+{
+ struct atmel_nand_controller *nc;
+ struct atmel_smc_cs_conf smcconf;
+ struct atmel_nand_cs *cs;
+ int ret;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
+ if (ret)
+ return ret;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ cs = &nand->cs[csline];
+ cs->smcconf = smcconf;
+ atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
+
+ return 0;
+}
+
+static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand,
+ int csline,
+ const struct nand_interface_config *conf)
+{
+ struct atmel_hsmc_nand_controller *nc;
+ struct atmel_smc_cs_conf smcconf;
+ struct atmel_nand_cs *cs;
+ int ret;
+
+ nc = to_hsmc_nand_controller(nand->base.controller);
+
+ ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
+ if (ret)
+ return ret;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ cs = &nand->cs[csline];
+ cs->smcconf = smcconf;
+
+ if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
+ cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
+
+ atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id,
+ &cs->smcconf);
+
+ return 0;
+}
+
+static int atmel_nand_setup_interface(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_nand_controller *nc;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ if (csline >= nand->numcs ||
+ (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
+ return -EINVAL;
+
+ return nc->caps->ops->setup_interface(nand, csline, conf);
+}
+
+static int atmel_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_nand_controller *nc;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ return nc->caps->ops->exec_op(nand, op, check_only);
+}
+
+static void atmel_nand_init(struct atmel_nand_controller *nc,
+ struct atmel_nand *nand)
+{
+ struct nand_chip *chip = &nand->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ mtd->dev.parent = nc->dev;
+ nand->base.controller = &nc->base;
+
+ if (!nc->mck || !nc->caps->ops->setup_interface)
+ chip->options |= NAND_KEEP_TIMINGS;
+
+ /*
+ * Use a bounce buffer when the buffer passed by the MTD user is not
+ * suitable for DMA.
+ */
+ if (nc->dmac)
+ chip->options |= NAND_USES_DMA;
+
+ /* Default to HW ECC if pmecc is available. */
+ if (nc->pmecc)
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+}
+
+static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
+ struct atmel_nand *nand)
+{
+ struct nand_chip *chip = &nand->base;
+ struct atmel_smc_nand_controller *smc_nc;
+ int i;
+
+ atmel_nand_init(nc, nand);
+
+ smc_nc = to_smc_nand_controller(chip->controller);
+ if (!smc_nc->ebi_csa_regmap)
+ return;
+
+ /* Attach the CS to the NAND Flash logic. */
+ for (i = 0; i < nand->numcs; i++)
+ regmap_update_bits(smc_nc->ebi_csa_regmap,
+ smc_nc->ebi_csa->offs,
+ BIT(nand->cs[i].id), BIT(nand->cs[i].id));
+
+ if (smc_nc->ebi_csa->nfd0_on_d16)
+ regmap_update_bits(smc_nc->ebi_csa_regmap,
+ smc_nc->ebi_csa->offs,
+ smc_nc->ebi_csa->nfd0_on_d16,
+ smc_nc->ebi_csa->nfd0_on_d16);
+}
+
+static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
+{
+ struct nand_chip *chip = &nand->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ nand_cleanup(chip);
+ list_del(&nand->node);
+
+ return 0;
+}
+
+static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
+ struct device_node *np,
+ int reg_cells)
+{
+ struct atmel_nand *nand;
+ struct gpio_desc *gpio;
+ int numcs, ret, i;
+
+ numcs = of_property_count_elems_of_size(np, "reg",
+ reg_cells * sizeof(u32));
+ if (numcs < 1) {
+ dev_err(nc->dev, "Missing or invalid reg property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL);
+ if (!nand) {
+ dev_err(nc->dev, "Failed to allocate NAND object\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ nand->numcs = numcs;
+
+ gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np),
+ "det", GPIOD_IN, "nand-det");
+ if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
+ dev_err(nc->dev,
+ "Failed to get detect gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return ERR_CAST(gpio);
+ }
+
+ if (!IS_ERR(gpio))
+ nand->cdgpio = gpio;
+
+ for (i = 0; i < numcs; i++) {
+ struct resource res;
+ u32 val;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(nc->dev, "Invalid reg property (err = %d)\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = of_property_read_u32_index(np, "reg", i * reg_cells,
+ &val);
+ if (ret) {
+ dev_err(nc->dev, "Invalid reg property (err = %d)\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ nand->cs[i].id = val;
+
+ nand->cs[i].io.dma = res.start;
+ nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
+ if (IS_ERR(nand->cs[i].io.virt))
+ return ERR_CAST(nand->cs[i].io.virt);
+
+ if (!of_property_read_u32(np, "atmel,rb", &val)) {
+ if (val > ATMEL_NFC_MAX_RB_ID)
+ return ERR_PTR(-EINVAL);
+
+ nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
+ nand->cs[i].rb.id = val;
+ } else {
+ gpio = devm_fwnode_gpiod_get_index(nc->dev,
+ of_fwnode_handle(np),
+ "rb", i, GPIOD_IN,
+ "nand-rb");
+ if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
+ dev_err(nc->dev,
+ "Failed to get R/B gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return ERR_CAST(gpio);
+ }
+
+ if (!IS_ERR(gpio)) {
+ nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
+ nand->cs[i].rb.gpio = gpio;
+ }
+ }
+
+ gpio = devm_fwnode_gpiod_get_index(nc->dev,
+ of_fwnode_handle(np),
+ "cs", i, GPIOD_OUT_HIGH,
+ "nand-cs");
+ if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
+ dev_err(nc->dev,
+ "Failed to get CS gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return ERR_CAST(gpio);
+ }
+
+ if (!IS_ERR(gpio))
+ nand->cs[i].csgpio = gpio;
+ }
+
+ nand_set_flash_node(&nand->base, np);
+
+ return nand;
+}
+
+static int
+atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
+ struct atmel_nand *nand)
+{
+ struct nand_chip *chip = &nand->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /* No card inserted, skip this NAND. */
+ if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
+ dev_info(nc->dev, "No SmartMedia card inserted.\n");
+ return 0;
+ }
+
+ nc->caps->ops->nand_init(nc, nand);
+
+ ret = nand_scan(chip, nand->numcs);
+ if (ret) {
+ dev_err(nc->dev, "NAND scan failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&nand->node, &nc->chips);
+
+ return 0;
+}
+
+static int
+atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
+{
+ struct atmel_nand *nand, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
+ ret = atmel_nand_controller_remove_nand(nand);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
+{
+ struct device *dev = nc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct atmel_nand *nand;
+ struct gpio_desc *gpio;
+ struct resource *res;
+
+ /*
+ * Legacy bindings only allow connecting a single NAND with a unique CS
+ * line to the controller.
+ */
+ nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
+ GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand->numcs = 1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nand->cs[0].io.virt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nand->cs[0].io.virt))
+ return PTR_ERR(nand->cs[0].io.virt);
+
+ nand->cs[0].io.dma = res->start;
+
+ /*
+ * The old driver was hardcoding the CS id to 3 for all sama5
+ * controllers. Since this id is only meaningful for the sama5
+ * controller we can safely assign this id to 3 no matter the
+ * controller.
+ * If one wants to connect a NAND to a different CS line, he will
+ * have to use the new bindings.
+ */
+ nand->cs[0].id = 3;
+
+ /* R/B GPIO. */
+ gpio = devm_gpiod_get_index_optional(dev, NULL, 0, GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return PTR_ERR(gpio);
+ }
+
+ if (gpio) {
+ nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
+ nand->cs[0].rb.gpio = gpio;
+ }
+
+ /* CS GPIO. */
+ gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return PTR_ERR(gpio);
+ }
+
+ nand->cs[0].csgpio = gpio;
+
+ /* Card detect GPIO. */
+ gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(dev,
+ "Failed to get detect gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return PTR_ERR(gpio);
+ }
+
+ nand->cdgpio = gpio;
+
+ nand_set_flash_node(&nand->base, nc->dev->of_node);
+
+ return atmel_nand_controller_add_nand(nc, nand);
+}
+
+static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
+{
+ struct device_node *np, *nand_np;
+ struct device *dev = nc->dev;
+ int ret, reg_cells;
+ u32 val;
+
+ /* We do not retrieve the SMC syscon when parsing old DTs. */
+ if (nc->caps->legacy_of_bindings)
+ return atmel_nand_controller_legacy_add_nands(nc);
+
+ np = dev->of_node;
+
+ ret = of_property_read_u32(np, "#address-cells", &val);
+ if (ret) {
+ dev_err(dev, "missing #address-cells property\n");
+ return ret;
+ }
+
+ reg_cells = val;
+
+ ret = of_property_read_u32(np, "#size-cells", &val);
+ if (ret) {
+ dev_err(dev, "missing #size-cells property\n");
+ return ret;
+ }
+
+ reg_cells += val;
+
+ for_each_child_of_node(np, nand_np) {
+ struct atmel_nand *nand;
+
+ nand = atmel_nand_create(nc, nand_np, reg_cells);
+ if (IS_ERR(nand)) {
+ ret = PTR_ERR(nand);
+ goto err;
+ }
+
+ ret = atmel_nand_controller_add_nand(nc, nand);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ atmel_nand_controller_remove_nands(nc);
+
+ return ret;
+}
+
+static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
+{
+ if (nc->dmac)
+ dma_release_channel(nc->dmac);
+
+ clk_put(nc->mck);
+}
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9260_ebi_csa = {
+ .offs = AT91SAM9260_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9261_ebi_csa = {
+ .offs = AT91SAM9261_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9263_ebi_csa = {
+ .offs = AT91SAM9263_MATRIX_EBI0CSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9rl_ebi_csa = {
+ .offs = AT91SAM9RL_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9g45_ebi_csa = {
+ .offs = AT91SAM9G45_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9n12_ebi_csa = {
+ .offs = AT91SAM9N12_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9x5_ebi_csa = {
+ .offs = AT91SAM9X5_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg sam9x60_ebi_csa = {
+ .offs = AT91_SFR_CCFG_EBICSA,
+ .nfd0_on_d16 = AT91_SFR_CCFG_NFD0_ON_D16,
+};
+
+static const struct of_device_id atmel_ebi_csa_regmap_of_ids[] = {
+ {
+ .compatible = "atmel,at91sam9260-matrix",
+ .data = &at91sam9260_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9261-matrix",
+ .data = &at91sam9261_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9263-matrix",
+ .data = &at91sam9263_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9rl-matrix",
+ .data = &at91sam9rl_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-matrix",
+ .data = &at91sam9g45_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9n12-matrix",
+ .data = &at91sam9n12_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-matrix",
+ .data = &at91sam9x5_ebi_csa,
+ },
+ {
+ .compatible = "microchip,sam9x60-sfr",
+ .data = &sam9x60_ebi_csa,
+ },
+ { /* sentinel */ },
+};
+
+static int atmel_nand_attach_chip(struct nand_chip *chip)
+{
+ struct atmel_nand_controller *nc = to_nand_controller(chip->controller);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nc->caps->ops->ecc_init(chip);
+ if (ret)
+ return ret;
+
+ if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "atmel_nand";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your nand node:
+ *
+ * label = "atmel_nand";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nc->dev),
+ nand->cs[0].id);
+ if (!mtd->name) {
+ dev_err(nc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops atmel_nand_controller_ops = {
+ .attach_chip = atmel_nand_attach_chip,
+ .setup_interface = atmel_nand_setup_interface,
+ .exec_op = atmel_nand_exec_op,
+};
+
+static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
+ struct platform_device *pdev,
+ const struct atmel_nand_controller_caps *caps)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ nand_controller_init(&nc->base);
+ nc->base.ops = &atmel_nand_controller_ops;
+ INIT_LIST_HEAD(&nc->chips);
+ nc->dev = dev;
+ nc->caps = caps;
+
+ platform_set_drvdata(pdev, nc);
+
+ nc->pmecc = devm_atmel_pmecc_get(dev);
+ if (IS_ERR(nc->pmecc))
+ return dev_err_probe(dev, PTR_ERR(nc->pmecc),
+ "Could not get PMECC object\n");
+
+ if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ nc->dmac = dma_request_channel(mask, NULL, NULL);
+ if (!nc->dmac)
+ dev_err(nc->dev, "Failed to request DMA channel\n");
+ }
+
+ /* We do not retrieve the SMC syscon when parsing old DTs. */
+ if (nc->caps->legacy_of_bindings)
+ return 0;
+
+ nc->mck = of_clk_get(dev->parent->of_node, 0);
+ if (IS_ERR(nc->mck)) {
+ dev_err(dev, "Failed to retrieve MCK clk\n");
+ ret = PTR_ERR(nc->mck);
+ goto out_release_dma;
+ }
+
+ np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
+ if (!np) {
+ dev_err(dev, "Missing or invalid atmel,smc property\n");
+ ret = -EINVAL;
+ goto out_release_dma;
+ }
+
+ nc->smc = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(nc->smc)) {
+ ret = PTR_ERR(nc->smc);
+ dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
+ goto out_release_dma;
+ }
+
+ return 0;
+
+out_release_dma:
+ if (nc->dmac)
+ dma_release_channel(nc->dmac);
+
+ return ret;
+}
+
+static int
+atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
+{
+ struct device *dev = nc->base.dev;
+ const struct of_device_id *match;
+ struct device_node *np;
+ int ret;
+
+ /* We do not retrieve the EBICSA regmap when parsing old DTs. */
+ if (nc->base.caps->legacy_of_bindings)
+ return 0;
+
+ np = of_parse_phandle(dev->parent->of_node,
+ nc->base.caps->ebi_csa_regmap_name, 0);
+ if (!np)
+ return 0;
+
+ match = of_match_node(atmel_ebi_csa_regmap_of_ids, np);
+ if (!match) {
+ of_node_put(np);
+ return 0;
+ }
+
+ nc->ebi_csa_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(nc->ebi_csa_regmap)) {
+ ret = PTR_ERR(nc->ebi_csa_regmap);
+ dev_err(dev, "Could not get EBICSA regmap (err = %d)\n", ret);
+ return ret;
+ }
+
+ nc->ebi_csa = (struct atmel_smc_nand_ebi_csa_cfg *)match->data;
+
+ /*
+ * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
+ * add 4 to ->ebi_csa->offs.
+ */
+ if (of_device_is_compatible(dev->parent->of_node,
+ "atmel,at91sam9263-ebi1"))
+ nc->ebi_csa->offs += 4;
+
+ return 0;
+}
+
+static int
+atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
+{
+ struct regmap_config regmap_conf = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ struct device *dev = nc->base.dev;
+ struct device_node *nand_np, *nfc_np;
+ void __iomem *iomem;
+ struct resource res;
+ int ret;
+
+ nand_np = dev->of_node;
+ nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
+ if (!nfc_np) {
+ dev_err(dev, "Could not find device node for sama5d3-nfc\n");
+ return -ENODEV;
+ }
+
+ nc->clk = of_clk_get(nfc_np, 0);
+ if (IS_ERR(nc->clk)) {
+ ret = PTR_ERR(nc->clk);
+ dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(nc->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ nc->irq = of_irq_get(nand_np, 0);
+ if (nc->irq <= 0) {
+ ret = nc->irq ?: -ENXIO;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get IRQ number (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ ret = of_address_to_resource(nfc_np, 0, &res);
+ if (ret) {
+ dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ iomem = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ goto out;
+ }
+
+ regmap_conf.name = "nfc-io";
+ regmap_conf.max_register = resource_size(&res) - 4;
+ nc->io = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
+ if (IS_ERR(nc->io)) {
+ ret = PTR_ERR(nc->io);
+ dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ ret = of_address_to_resource(nfc_np, 1, &res);
+ if (ret) {
+ dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ iomem = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ goto out;
+ }
+
+ regmap_conf.name = "smc";
+ regmap_conf.max_register = resource_size(&res) - 4;
+ nc->base.smc = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
+ if (IS_ERR(nc->base.smc)) {
+ ret = PTR_ERR(nc->base.smc);
+ dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ ret = of_address_to_resource(nfc_np, 2, &res);
+ if (ret) {
+ dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ nc->sram.virt = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(nc->sram.virt)) {
+ ret = PTR_ERR(nc->sram.virt);
+ goto out;
+ }
+
+ nc->sram.dma = res.start;
+
+out:
+ of_node_put(nfc_np);
+
+ return ret;
+}
+
+static int
+atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
+{
+ struct device *dev = nc->base.dev;
+ struct device_node *np;
+ int ret;
+
+ np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
+ if (!np) {
+ dev_err(dev, "Missing or invalid atmel,smc property\n");
+ return -EINVAL;
+ }
+
+ nc->hsmc_layout = atmel_hsmc_get_reg_layout(np);
+
+ nc->irq = of_irq_get(np, 0);
+ of_node_put(np);
+ if (nc->irq <= 0) {
+ ret = nc->irq ?: -ENXIO;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get IRQ number (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
+ if (!np) {
+ dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
+ return -EINVAL;
+ }
+
+ nc->io = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(nc->io)) {
+ ret = PTR_ERR(nc->io);
+ dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
+ return ret;
+ }
+
+ nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
+ "atmel,nfc-sram", 0);
+ if (!nc->sram.pool) {
+ dev_err(nc->base.dev, "Missing SRAM\n");
+ return -ENOMEM;
+ }
+
+ nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool,
+ ATMEL_NFC_SRAM_SIZE,
+ &nc->sram.dma);
+ if (!nc->sram.virt) {
+ dev_err(nc->base.dev,
+ "Could not allocate memory from the NFC SRAM pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
+{
+ struct atmel_hsmc_nand_controller *hsmc_nc;
+ int ret;
+
+ ret = atmel_nand_controller_remove_nands(nc);
+ if (ret)
+ return ret;
+
+ hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
+ regmap_write(hsmc_nc->base.smc, ATMEL_HSMC_NFC_CTRL,
+ ATMEL_HSMC_NFC_CTRL_DIS);
+
+ if (hsmc_nc->sram.pool)
+ gen_pool_free(hsmc_nc->sram.pool,
+ (unsigned long)hsmc_nc->sram.virt,
+ ATMEL_NFC_SRAM_SIZE);
+
+ if (hsmc_nc->clk) {
+ clk_disable_unprepare(hsmc_nc->clk);
+ clk_put(hsmc_nc->clk);
+ }
+
+ atmel_nand_controller_cleanup(nc);
+
+ return 0;
+}
+
+static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
+ const struct atmel_nand_controller_caps *caps)
+{
+ struct device *dev = &pdev->dev;
+ struct atmel_hsmc_nand_controller *nc;
+ int ret;
+
+ nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
+ if (!nc)
+ return -ENOMEM;
+
+ ret = atmel_nand_controller_init(&nc->base, pdev, caps);
+ if (ret)
+ return ret;
+
+ if (caps->legacy_of_bindings)
+ ret = atmel_hsmc_nand_controller_legacy_init(nc);
+ else
+ ret = atmel_hsmc_nand_controller_init(nc);
+
+ if (ret)
+ return ret;
+
+ /* Make sure all irqs are masked before registering our IRQ handler. */
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
+ ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
+ IRQF_SHARED, "nfc", nc);
+ if (ret) {
+ dev_err(dev,
+ "Could not get register NFC interrupt handler (err = %d)\n",
+ ret);
+ goto err;
+ }
+
+ /* Initial NFC configuration. */
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
+ ATMEL_HSMC_NFC_CFG_DTO_MAX);
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
+ ATMEL_HSMC_NFC_CTRL_EN);
+
+ ret = atmel_nand_controller_add_nands(&nc->base);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ atmel_hsmc_nand_controller_remove(&nc->base);
+
+ return ret;
+}
+
+static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
+ .probe = atmel_hsmc_nand_controller_probe,
+ .remove = atmel_hsmc_nand_controller_remove,
+ .ecc_init = atmel_hsmc_nand_ecc_init,
+ .nand_init = atmel_nand_init,
+ .setup_interface = atmel_hsmc_nand_setup_interface,
+ .exec_op = atmel_hsmc_nand_exec_op,
+};
+
+static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ops = &atmel_hsmc_nc_ops,
+};
+
+/* Only used to parse old bindings. */
+static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ops = &atmel_hsmc_nc_ops,
+ .legacy_of_bindings = true,
+};
+
+static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
+ const struct atmel_nand_controller_caps *caps)
+{
+ struct device *dev = &pdev->dev;
+ struct atmel_smc_nand_controller *nc;
+ int ret;
+
+ nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
+ if (!nc)
+ return -ENOMEM;
+
+ ret = atmel_nand_controller_init(&nc->base, pdev, caps);
+ if (ret)
+ return ret;
+
+ ret = atmel_smc_nand_controller_init(nc);
+ if (ret)
+ return ret;
+
+ return atmel_nand_controller_add_nands(&nc->base);
+}
+
+static int
+atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
+{
+ int ret;
+
+ ret = atmel_nand_controller_remove_nands(nc);
+ if (ret)
+ return ret;
+
+ atmel_nand_controller_cleanup(nc);
+
+ return 0;
+}
+
+/*
+ * The SMC reg layout of at91rm9200 is completely different which prevents us
+ * from re-using atmel_smc_nand_setup_interface() for the
+ * ->setup_interface() hook.
+ * At this point, there's no support for the at91rm9200 SMC IP, so we leave
+ * ->setup_interface() unassigned.
+ */
+static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
+ .probe = atmel_smc_nand_controller_probe,
+ .remove = atmel_smc_nand_controller_remove,
+ .ecc_init = atmel_nand_ecc_init,
+ .nand_init = atmel_smc_nand_init,
+ .exec_op = atmel_smc_nand_exec_op,
+};
+
+static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
+ .ops = &at91rm9200_nc_ops,
+};
+
+static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
+ .probe = atmel_smc_nand_controller_probe,
+ .remove = atmel_smc_nand_controller_remove,
+ .ecc_init = atmel_nand_ecc_init,
+ .nand_init = atmel_smc_nand_init,
+ .setup_interface = atmel_smc_nand_setup_interface,
+ .exec_op = atmel_smc_nand_exec_op,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
+ .ops = &atmel_smc_nc_ops,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
+ .ale_offs = BIT(22),
+ .cle_offs = BIT(21),
+ .ebi_csa_regmap_name = "atmel,matrix",
+ .ops = &atmel_smc_nc_ops,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
+ .ops = &atmel_smc_nc_ops,
+};
+
+static const struct atmel_nand_controller_caps microchip_sam9x60_nc_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "microchip,sfr",
+ .ops = &atmel_smc_nc_ops,
+};
+
+/* Only used to parse old bindings. */
+static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ops = &atmel_smc_nc_ops,
+ .legacy_of_bindings = true,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
+ .ale_offs = BIT(22),
+ .cle_offs = BIT(21),
+ .ops = &atmel_smc_nc_ops,
+ .legacy_of_bindings = true,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ops = &atmel_smc_nc_ops,
+ .legacy_of_bindings = true,
+};
+
+static const struct of_device_id atmel_nand_controller_of_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-nand-controller",
+ .data = &atmel_rm9200_nc_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9260-nand-controller",
+ .data = &atmel_sam9260_nc_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9261-nand-controller",
+ .data = &atmel_sam9261_nc_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-nand-controller",
+ .data = &atmel_sam9g45_nc_caps,
+ },
+ {
+ .compatible = "atmel,sama5d3-nand-controller",
+ .data = &atmel_sama5_nc_caps,
+ },
+ {
+ .compatible = "microchip,sam9x60-nand-controller",
+ .data = &microchip_sam9x60_nc_caps,
+ },
+ /* Support for old/deprecated bindings: */
+ {
+ .compatible = "atmel,at91rm9200-nand",
+ .data = &atmel_rm9200_nand_caps,
+ },
+ {
+ .compatible = "atmel,sama5d4-nand",
+ .data = &atmel_rm9200_nand_caps,
+ },
+ {
+ .compatible = "atmel,sama5d2-nand",
+ .data = &atmel_rm9200_nand_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
+
+static int atmel_nand_controller_probe(struct platform_device *pdev)
+{
+ const struct atmel_nand_controller_caps *caps;
+
+ if (pdev->id_entry)
+ caps = (void *)pdev->id_entry->driver_data;
+ else
+ caps = of_device_get_match_data(&pdev->dev);
+
+ if (!caps) {
+ dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
+ return -EINVAL;
+ }
+
+ if (caps->legacy_of_bindings) {
+ struct device_node *nfc_node;
+ u32 ale_offs = 21;
+
+ /*
+ * If we are parsing legacy DT props and the DT contains a
+ * valid NFC node, forward the request to the sama5 logic.
+ */
+ nfc_node = of_get_compatible_child(pdev->dev.of_node,
+ "atmel,sama5d3-nfc");
+ if (nfc_node) {
+ caps = &atmel_sama5_nand_caps;
+ of_node_put(nfc_node);
+ }
+
+ /*
+ * Even if the compatible says we are dealing with an
+ * at91rm9200 controller, the atmel,nand-has-dma specify that
+ * this controller supports DMA, which means we are in fact
+ * dealing with an at91sam9g45+ controller.
+ */
+ if (!caps->has_dma &&
+ of_property_read_bool(pdev->dev.of_node,
+ "atmel,nand-has-dma"))
+ caps = &atmel_sam9g45_nand_caps;
+
+ /*
+ * All SoCs except the at91sam9261 are assigning ALE to A21 and
+ * CLE to A22. If atmel,nand-addr-offset != 21 this means we're
+ * actually dealing with an at91sam9261 controller.
+ */
+ of_property_read_u32(pdev->dev.of_node,
+ "atmel,nand-addr-offset", &ale_offs);
+ if (ale_offs != 21)
+ caps = &atmel_sam9261_nand_caps;
+ }
+
+ return caps->ops->probe(pdev, caps);
+}
+
+static int atmel_nand_controller_remove(struct platform_device *pdev)
+{
+ struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
+
+ return nc->caps->ops->remove(nc);
+}
+
+static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
+{
+ struct atmel_nand_controller *nc = dev_get_drvdata(dev);
+ struct atmel_nand *nand;
+
+ if (nc->pmecc)
+ atmel_pmecc_reset(nc->pmecc);
+
+ list_for_each_entry(nand, &nc->chips, node) {
+ int i;
+
+ for (i = 0; i < nand->numcs; i++)
+ nand_reset(&nand->base, i);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
+ atmel_nand_controller_resume);
+
+static struct platform_driver atmel_nand_controller_driver = {
+ .driver = {
+ .name = "atmel-nand-controller",
+ .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
+ .pm = &atmel_nand_controller_pm_ops,
+ },
+ .probe = atmel_nand_controller_probe,
+ .remove = atmel_nand_controller_remove,
+};
+module_platform_driver(atmel_nand_controller_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
+MODULE_ALIAS("platform:atmel-nand-controller");
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
new file mode 100644
index 000000000..cbb023bf0
--- /dev/null
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -0,0 +1,1018 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 ATMEL
+ * Copyright 2017 Free Electrons
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ * Derived from the atmel_nand.c driver which contained the following
+ * copyrights:
+ *
+ * Copyright 2003 Rick Bronson
+ *
+ * Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
+ * Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * Derived from drivers/mtd/spia.c (removed in v3.8)
+ * Copyright 2000 Steven J. Hill (sjhill@cotw.com)
+ *
+ * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
+ * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
+ *
+ * Derived from Das U-Boot source code
+ * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
+ * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ * Add Programmable Multibit ECC support for various AT91 SoC
+ * Copyright 2012 ATMEL, Hong Xu
+ *
+ * Add Nand Flash Controller support for SAMA5 SoC
+ * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
+ *
+ * The PMECC is an hardware assisted BCH engine, which means part of the
+ * ECC algorithm is left to the software. The hardware/software repartition
+ * is explained in the "PMECC Controller Functional Description" chapter in
+ * Atmel datasheets, and some of the functions in this file are directly
+ * implementing the algorithms described in the "Software Implementation"
+ * sub-section.
+ *
+ * TODO: it seems that the software BCH implementation in lib/bch.c is already
+ * providing some of the logic we are implementing here. It would be smart
+ * to expose the needed lib/bch.c helpers/functions and re-use them here.
+ */
+
+#include <linux/genalloc.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pmecc.h"
+
+/* Galois field dimension */
+#define PMECC_GF_DIMENSION_13 13
+#define PMECC_GF_DIMENSION_14 14
+
+/* Primitive Polynomial used by PMECC */
+#define PMECC_GF_13_PRIMITIVE_POLY 0x201b
+#define PMECC_GF_14_PRIMITIVE_POLY 0x4443
+
+#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
+#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
+
+/* Time out value for reading PMECC status register */
+#define PMECC_MAX_TIMEOUT_MS 100
+
+/* PMECC Register Definitions */
+#define ATMEL_PMECC_CFG 0x0
+#define PMECC_CFG_BCH_STRENGTH(x) (x)
+#define PMECC_CFG_BCH_STRENGTH_MASK GENMASK(2, 0)
+#define PMECC_CFG_SECTOR512 (0 << 4)
+#define PMECC_CFG_SECTOR1024 (1 << 4)
+#define PMECC_CFG_NSECTORS(x) ((fls(x) - 1) << 8)
+#define PMECC_CFG_READ_OP (0 << 12)
+#define PMECC_CFG_WRITE_OP (1 << 12)
+#define PMECC_CFG_SPARE_ENABLE BIT(16)
+#define PMECC_CFG_AUTO_ENABLE BIT(20)
+
+#define ATMEL_PMECC_SAREA 0x4
+#define ATMEL_PMECC_SADDR 0x8
+#define ATMEL_PMECC_EADDR 0xc
+
+#define ATMEL_PMECC_CLK 0x10
+#define PMECC_CLK_133MHZ (2 << 0)
+
+#define ATMEL_PMECC_CTRL 0x14
+#define PMECC_CTRL_RST BIT(0)
+#define PMECC_CTRL_DATA BIT(1)
+#define PMECC_CTRL_USER BIT(2)
+#define PMECC_CTRL_ENABLE BIT(4)
+#define PMECC_CTRL_DISABLE BIT(5)
+
+#define ATMEL_PMECC_SR 0x18
+#define PMECC_SR_BUSY BIT(0)
+#define PMECC_SR_ENABLE BIT(4)
+
+#define ATMEL_PMECC_IER 0x1c
+#define ATMEL_PMECC_IDR 0x20
+#define ATMEL_PMECC_IMR 0x24
+#define ATMEL_PMECC_ISR 0x28
+#define PMECC_ERROR_INT BIT(0)
+
+#define ATMEL_PMECC_ECC(sector, n) \
+ ((((sector) + 1) * 0x40) + (n))
+
+#define ATMEL_PMECC_REM(sector, n) \
+ ((((sector) + 1) * 0x40) + ((n) * 4) + 0x200)
+
+/* PMERRLOC Register Definitions */
+#define ATMEL_PMERRLOC_ELCFG 0x0
+#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
+#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
+#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
+
+#define ATMEL_PMERRLOC_ELPRIM 0x4
+#define ATMEL_PMERRLOC_ELEN 0x8
+#define ATMEL_PMERRLOC_ELDIS 0xc
+#define PMERRLOC_DISABLE BIT(0)
+
+#define ATMEL_PMERRLOC_ELSR 0x10
+#define PMERRLOC_ELSR_BUSY BIT(0)
+
+#define ATMEL_PMERRLOC_ELIER 0x14
+#define ATMEL_PMERRLOC_ELIDR 0x18
+#define ATMEL_PMERRLOC_ELIMR 0x1c
+#define ATMEL_PMERRLOC_ELISR 0x20
+#define PMERRLOC_ERR_NUM_MASK GENMASK(12, 8)
+#define PMERRLOC_CALC_DONE BIT(0)
+
+#define ATMEL_PMERRLOC_SIGMA(x) (((x) * 0x4) + 0x28)
+
+#define ATMEL_PMERRLOC_EL(offs, x) (((x) * 0x4) + (offs))
+
+struct atmel_pmecc_gf_tables {
+ u16 *alpha_to;
+ u16 *index_of;
+};
+
+struct atmel_pmecc_caps {
+ const int *strengths;
+ int nstrengths;
+ int el_offset;
+ bool correct_erased_chunks;
+};
+
+struct atmel_pmecc {
+ struct device *dev;
+ const struct atmel_pmecc_caps *caps;
+
+ struct {
+ void __iomem *base;
+ void __iomem *errloc;
+ } regs;
+
+ struct mutex lock;
+};
+
+struct atmel_pmecc_user_conf_cache {
+ u32 cfg;
+ u32 sarea;
+ u32 saddr;
+ u32 eaddr;
+};
+
+struct atmel_pmecc_user {
+ struct atmel_pmecc_user_conf_cache cache;
+ struct atmel_pmecc *pmecc;
+ const struct atmel_pmecc_gf_tables *gf_tables;
+ int eccbytes;
+ s16 *partial_syn;
+ s16 *si;
+ s16 *lmu;
+ s16 *smu;
+ s32 *mu;
+ s32 *dmu;
+ s32 *delta;
+ u32 isr;
+};
+
+static DEFINE_MUTEX(pmecc_gf_tables_lock);
+static const struct atmel_pmecc_gf_tables *pmecc_gf_tables_512;
+static const struct atmel_pmecc_gf_tables *pmecc_gf_tables_1024;
+
+static inline int deg(unsigned int poly)
+{
+ /* polynomial degree is the most-significant bit index */
+ return fls(poly) - 1;
+}
+
+static int atmel_pmecc_build_gf_tables(int mm, unsigned int poly,
+ struct atmel_pmecc_gf_tables *gf_tables)
+{
+ unsigned int i, x = 1;
+ const unsigned int k = BIT(deg(poly));
+ unsigned int nn = BIT(mm) - 1;
+
+ /* primitive polynomial must be of degree m */
+ if (k != (1u << mm))
+ return -EINVAL;
+
+ for (i = 0; i < nn; i++) {
+ gf_tables->alpha_to[i] = x;
+ gf_tables->index_of[x] = i;
+ if (i && (x == 1))
+ /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
+ return -EINVAL;
+ x <<= 1;
+ if (x & k)
+ x ^= poly;
+ }
+ gf_tables->alpha_to[nn] = 1;
+ gf_tables->index_of[0] = 0;
+
+ return 0;
+}
+
+static const struct atmel_pmecc_gf_tables *
+atmel_pmecc_create_gf_tables(const struct atmel_pmecc_user_req *req)
+{
+ struct atmel_pmecc_gf_tables *gf_tables;
+ unsigned int poly, degree, table_size;
+ int ret;
+
+ if (req->ecc.sectorsize == 512) {
+ degree = PMECC_GF_DIMENSION_13;
+ poly = PMECC_GF_13_PRIMITIVE_POLY;
+ table_size = PMECC_LOOKUP_TABLE_SIZE_512;
+ } else {
+ degree = PMECC_GF_DIMENSION_14;
+ poly = PMECC_GF_14_PRIMITIVE_POLY;
+ table_size = PMECC_LOOKUP_TABLE_SIZE_1024;
+ }
+
+ gf_tables = kzalloc(sizeof(*gf_tables) +
+ (2 * table_size * sizeof(u16)),
+ GFP_KERNEL);
+ if (!gf_tables)
+ return ERR_PTR(-ENOMEM);
+
+ gf_tables->alpha_to = (void *)(gf_tables + 1);
+ gf_tables->index_of = gf_tables->alpha_to + table_size;
+
+ ret = atmel_pmecc_build_gf_tables(degree, poly, gf_tables);
+ if (ret) {
+ kfree(gf_tables);
+ return ERR_PTR(ret);
+ }
+
+ return gf_tables;
+}
+
+static const struct atmel_pmecc_gf_tables *
+atmel_pmecc_get_gf_tables(const struct atmel_pmecc_user_req *req)
+{
+ const struct atmel_pmecc_gf_tables **gf_tables, *ret;
+
+ mutex_lock(&pmecc_gf_tables_lock);
+ if (req->ecc.sectorsize == 512)
+ gf_tables = &pmecc_gf_tables_512;
+ else
+ gf_tables = &pmecc_gf_tables_1024;
+
+ ret = *gf_tables;
+
+ if (!ret) {
+ ret = atmel_pmecc_create_gf_tables(req);
+ if (!IS_ERR(ret))
+ *gf_tables = ret;
+ }
+ mutex_unlock(&pmecc_gf_tables_lock);
+
+ return ret;
+}
+
+static int atmel_pmecc_prepare_user_req(struct atmel_pmecc *pmecc,
+ struct atmel_pmecc_user_req *req)
+{
+ int i, max_eccbytes, eccbytes = 0, eccstrength = 0;
+
+ if (req->pagesize <= 0 || req->oobsize <= 0 || req->ecc.bytes <= 0)
+ return -EINVAL;
+
+ if (req->ecc.ooboffset >= 0 &&
+ req->ecc.ooboffset + req->ecc.bytes > req->oobsize)
+ return -EINVAL;
+
+ if (req->ecc.sectorsize == ATMEL_PMECC_SECTOR_SIZE_AUTO) {
+ if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH)
+ return -EINVAL;
+
+ if (req->pagesize > 512)
+ req->ecc.sectorsize = 1024;
+ else
+ req->ecc.sectorsize = 512;
+ }
+
+ if (req->ecc.sectorsize != 512 && req->ecc.sectorsize != 1024)
+ return -EINVAL;
+
+ if (req->pagesize % req->ecc.sectorsize)
+ return -EINVAL;
+
+ req->ecc.nsectors = req->pagesize / req->ecc.sectorsize;
+
+ max_eccbytes = req->ecc.bytes;
+
+ for (i = 0; i < pmecc->caps->nstrengths; i++) {
+ int nbytes, strength = pmecc->caps->strengths[i];
+
+ if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH &&
+ strength < req->ecc.strength)
+ continue;
+
+ nbytes = DIV_ROUND_UP(strength * fls(8 * req->ecc.sectorsize),
+ 8);
+ nbytes *= req->ecc.nsectors;
+
+ if (nbytes > max_eccbytes)
+ break;
+
+ eccstrength = strength;
+ eccbytes = nbytes;
+
+ if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH)
+ break;
+ }
+
+ if (!eccstrength)
+ return -EINVAL;
+
+ req->ecc.bytes = eccbytes;
+ req->ecc.strength = eccstrength;
+
+ if (req->ecc.ooboffset < 0)
+ req->ecc.ooboffset = req->oobsize - eccbytes;
+
+ return 0;
+}
+
+struct atmel_pmecc_user *
+atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
+ struct atmel_pmecc_user_req *req)
+{
+ struct atmel_pmecc_user *user;
+ const struct atmel_pmecc_gf_tables *gf_tables;
+ int strength, size, ret;
+
+ ret = atmel_pmecc_prepare_user_req(pmecc, req);
+ if (ret)
+ return ERR_PTR(ret);
+
+ size = sizeof(*user);
+ size = ALIGN(size, sizeof(u16));
+ /* Reserve space for partial_syn, si and smu */
+ size += ((2 * req->ecc.strength) + 1) * sizeof(u16) *
+ (2 + req->ecc.strength + 2);
+ /* Reserve space for lmu. */
+ size += (req->ecc.strength + 1) * sizeof(u16);
+ /* Reserve space for mu, dmu and delta. */
+ size = ALIGN(size, sizeof(s32));
+ size += (req->ecc.strength + 1) * sizeof(s32) * 3;
+
+ user = kzalloc(size, GFP_KERNEL);
+ if (!user)
+ return ERR_PTR(-ENOMEM);
+
+ user->pmecc = pmecc;
+
+ user->partial_syn = (s16 *)PTR_ALIGN(user + 1, sizeof(u16));
+ user->si = user->partial_syn + ((2 * req->ecc.strength) + 1);
+ user->lmu = user->si + ((2 * req->ecc.strength) + 1);
+ user->smu = user->lmu + (req->ecc.strength + 1);
+ user->mu = (s32 *)PTR_ALIGN(user->smu +
+ (((2 * req->ecc.strength) + 1) *
+ (req->ecc.strength + 2)),
+ sizeof(s32));
+ user->dmu = user->mu + req->ecc.strength + 1;
+ user->delta = user->dmu + req->ecc.strength + 1;
+
+ gf_tables = atmel_pmecc_get_gf_tables(req);
+ if (IS_ERR(gf_tables)) {
+ kfree(user);
+ return ERR_CAST(gf_tables);
+ }
+
+ user->gf_tables = gf_tables;
+
+ user->eccbytes = req->ecc.bytes / req->ecc.nsectors;
+
+ for (strength = 0; strength < pmecc->caps->nstrengths; strength++) {
+ if (pmecc->caps->strengths[strength] == req->ecc.strength)
+ break;
+ }
+
+ user->cache.cfg = PMECC_CFG_BCH_STRENGTH(strength) |
+ PMECC_CFG_NSECTORS(req->ecc.nsectors);
+
+ if (req->ecc.sectorsize == 1024)
+ user->cache.cfg |= PMECC_CFG_SECTOR1024;
+
+ user->cache.sarea = req->oobsize - 1;
+ user->cache.saddr = req->ecc.ooboffset;
+ user->cache.eaddr = req->ecc.ooboffset + req->ecc.bytes - 1;
+
+ return user;
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_create_user);
+
+void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user)
+{
+ kfree(user);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_destroy_user);
+
+static int get_strength(struct atmel_pmecc_user *user)
+{
+ const int *strengths = user->pmecc->caps->strengths;
+
+ return strengths[user->cache.cfg & PMECC_CFG_BCH_STRENGTH_MASK];
+}
+
+static int get_sectorsize(struct atmel_pmecc_user *user)
+{
+ return user->cache.cfg & PMECC_CFG_SECTOR1024 ? 1024 : 512;
+}
+
+static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector)
+{
+ int strength = get_strength(user);
+ u32 value;
+ int i;
+
+ /* Fill odd syndromes */
+ for (i = 0; i < strength; i++) {
+ value = readl_relaxed(user->pmecc->regs.base +
+ ATMEL_PMECC_REM(sector, i / 2));
+ if (i & 1)
+ value >>= 16;
+
+ user->partial_syn[(2 * i) + 1] = value;
+ }
+}
+
+static void atmel_pmecc_substitute(struct atmel_pmecc_user *user)
+{
+ int degree = get_sectorsize(user) == 512 ? 13 : 14;
+ int cw_len = BIT(degree) - 1;
+ int strength = get_strength(user);
+ s16 *alpha_to = user->gf_tables->alpha_to;
+ s16 *index_of = user->gf_tables->index_of;
+ s16 *partial_syn = user->partial_syn;
+ s16 *si;
+ int i, j;
+
+ /*
+ * si[] is a table that holds the current syndrome value,
+ * an element of that table belongs to the field
+ */
+ si = user->si;
+
+ memset(&si[1], 0, sizeof(s16) * ((2 * strength) - 1));
+
+ /* Computation 2t syndromes based on S(x) */
+ /* Odd syndromes */
+ for (i = 1; i < 2 * strength; i += 2) {
+ for (j = 0; j < degree; j++) {
+ if (partial_syn[i] & BIT(j))
+ si[i] = alpha_to[i * j] ^ si[i];
+ }
+ }
+ /* Even syndrome = (Odd syndrome) ** 2 */
+ for (i = 2, j = 1; j <= strength; i = ++j << 1) {
+ if (si[j] == 0) {
+ si[i] = 0;
+ } else {
+ s16 tmp;
+
+ tmp = index_of[si[j]];
+ tmp = (tmp * 2) % cw_len;
+ si[i] = alpha_to[tmp];
+ }
+ }
+}
+
+static void atmel_pmecc_get_sigma(struct atmel_pmecc_user *user)
+{
+ s16 *lmu = user->lmu;
+ s16 *si = user->si;
+ s32 *mu = user->mu;
+ s32 *dmu = user->dmu;
+ s32 *delta = user->delta;
+ int degree = get_sectorsize(user) == 512 ? 13 : 14;
+ int cw_len = BIT(degree) - 1;
+ int strength = get_strength(user);
+ int num = 2 * strength + 1;
+ s16 *index_of = user->gf_tables->index_of;
+ s16 *alpha_to = user->gf_tables->alpha_to;
+ int i, j, k;
+ u32 dmu_0_count, tmp;
+ s16 *smu = user->smu;
+
+ /* index of largest delta */
+ int ro;
+ int largest;
+ int diff;
+
+ dmu_0_count = 0;
+
+ /* First Row */
+
+ /* Mu */
+ mu[0] = -1;
+
+ memset(smu, 0, sizeof(s16) * num);
+ smu[0] = 1;
+
+ /* discrepancy set to 1 */
+ dmu[0] = 1;
+ /* polynom order set to 0 */
+ lmu[0] = 0;
+ delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
+
+ /* Second Row */
+
+ /* Mu */
+ mu[1] = 0;
+ /* Sigma(x) set to 1 */
+ memset(&smu[num], 0, sizeof(s16) * num);
+ smu[num] = 1;
+
+ /* discrepancy set to S1 */
+ dmu[1] = si[1];
+
+ /* polynom order set to 0 */
+ lmu[1] = 0;
+
+ delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
+
+ /* Init the Sigma(x) last row */
+ memset(&smu[(strength + 1) * num], 0, sizeof(s16) * num);
+
+ for (i = 1; i <= strength; i++) {
+ mu[i + 1] = i << 1;
+ /* Begin Computing Sigma (Mu+1) and L(mu) */
+ /* check if discrepancy is set to 0 */
+ if (dmu[i] == 0) {
+ dmu_0_count++;
+
+ tmp = ((strength - (lmu[i] >> 1) - 1) / 2);
+ if ((strength - (lmu[i] >> 1) - 1) & 0x1)
+ tmp += 2;
+ else
+ tmp += 1;
+
+ if (dmu_0_count == tmp) {
+ for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
+ smu[(strength + 1) * num + j] =
+ smu[i * num + j];
+
+ lmu[strength + 1] = lmu[i];
+ return;
+ }
+
+ /* copy polynom */
+ for (j = 0; j <= lmu[i] >> 1; j++)
+ smu[(i + 1) * num + j] = smu[i * num + j];
+
+ /* copy previous polynom order to the next */
+ lmu[i + 1] = lmu[i];
+ } else {
+ ro = 0;
+ largest = -1;
+ /* find largest delta with dmu != 0 */
+ for (j = 0; j < i; j++) {
+ if ((dmu[j]) && (delta[j] > largest)) {
+ largest = delta[j];
+ ro = j;
+ }
+ }
+
+ /* compute difference */
+ diff = (mu[i] - mu[ro]);
+
+ /* Compute degree of the new smu polynomial */
+ if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
+ lmu[i + 1] = lmu[i];
+ else
+ lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
+
+ /* Init smu[i+1] with 0 */
+ for (k = 0; k < num; k++)
+ smu[(i + 1) * num + k] = 0;
+
+ /* Compute smu[i+1] */
+ for (k = 0; k <= lmu[ro] >> 1; k++) {
+ s16 a, b, c;
+
+ if (!(smu[ro * num + k] && dmu[i]))
+ continue;
+
+ a = index_of[dmu[i]];
+ b = index_of[dmu[ro]];
+ c = index_of[smu[ro * num + k]];
+ tmp = a + (cw_len - b) + c;
+ a = alpha_to[tmp % cw_len];
+ smu[(i + 1) * num + (k + diff)] = a;
+ }
+
+ for (k = 0; k <= lmu[i] >> 1; k++)
+ smu[(i + 1) * num + k] ^= smu[i * num + k];
+ }
+
+ /* End Computing Sigma (Mu+1) and L(mu) */
+ /* In either case compute delta */
+ delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
+
+ /* Do not compute discrepancy for the last iteration */
+ if (i >= strength)
+ continue;
+
+ for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
+ tmp = 2 * (i - 1);
+ if (k == 0) {
+ dmu[i + 1] = si[tmp + 3];
+ } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
+ s16 a, b, c;
+
+ a = index_of[smu[(i + 1) * num + k]];
+ b = si[2 * (i - 1) + 3 - k];
+ c = index_of[b];
+ tmp = a + c;
+ tmp %= cw_len;
+ dmu[i + 1] = alpha_to[tmp] ^ dmu[i + 1];
+ }
+ }
+ }
+}
+
+static int atmel_pmecc_err_location(struct atmel_pmecc_user *user)
+{
+ int sector_size = get_sectorsize(user);
+ int degree = sector_size == 512 ? 13 : 14;
+ struct atmel_pmecc *pmecc = user->pmecc;
+ int strength = get_strength(user);
+ int ret, roots_nbr, i, err_nbr = 0;
+ int num = (2 * strength) + 1;
+ s16 *smu = user->smu;
+ u32 val;
+
+ writel(PMERRLOC_DISABLE, pmecc->regs.errloc + ATMEL_PMERRLOC_ELDIS);
+
+ for (i = 0; i <= user->lmu[strength + 1] >> 1; i++) {
+ writel_relaxed(smu[(strength + 1) * num + i],
+ pmecc->regs.errloc + ATMEL_PMERRLOC_SIGMA(i));
+ err_nbr++;
+ }
+
+ val = (err_nbr - 1) << 16;
+ if (sector_size == 1024)
+ val |= 1;
+
+ writel(val, pmecc->regs.errloc + ATMEL_PMERRLOC_ELCFG);
+ writel((sector_size * 8) + (degree * strength),
+ pmecc->regs.errloc + ATMEL_PMERRLOC_ELEN);
+
+ ret = readl_relaxed_poll_timeout(pmecc->regs.errloc +
+ ATMEL_PMERRLOC_ELISR,
+ val, val & PMERRLOC_CALC_DONE, 0,
+ PMECC_MAX_TIMEOUT_MS * 1000);
+ if (ret) {
+ dev_err(pmecc->dev,
+ "PMECC: Timeout to calculate error location.\n");
+ return ret;
+ }
+
+ roots_nbr = (val & PMERRLOC_ERR_NUM_MASK) >> 8;
+ /* Number of roots == degree of smu hence <= cap */
+ if (roots_nbr == user->lmu[strength + 1] >> 1)
+ return err_nbr - 1;
+
+ /*
+ * Number of roots does not match the degree of smu
+ * unable to correct error.
+ */
+ return -EBADMSG;
+}
+
+int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector,
+ void *data, void *ecc)
+{
+ struct atmel_pmecc *pmecc = user->pmecc;
+ int sectorsize = get_sectorsize(user);
+ int eccbytes = user->eccbytes;
+ int i, nerrors;
+
+ if (!(user->isr & BIT(sector)))
+ return 0;
+
+ atmel_pmecc_gen_syndrome(user, sector);
+ atmel_pmecc_substitute(user);
+ atmel_pmecc_get_sigma(user);
+
+ nerrors = atmel_pmecc_err_location(user);
+ if (nerrors < 0)
+ return nerrors;
+
+ for (i = 0; i < nerrors; i++) {
+ const char *area;
+ int byte, bit;
+ u32 errpos;
+ u8 *ptr;
+
+ errpos = readl_relaxed(pmecc->regs.errloc +
+ ATMEL_PMERRLOC_EL(pmecc->caps->el_offset, i));
+ errpos--;
+
+ byte = errpos / 8;
+ bit = errpos % 8;
+
+ if (byte < sectorsize) {
+ ptr = data + byte;
+ area = "data";
+ } else if (byte < sectorsize + eccbytes) {
+ ptr = ecc + byte - sectorsize;
+ area = "ECC";
+ } else {
+ dev_dbg(pmecc->dev,
+ "Invalid errpos value (%d, max is %d)\n",
+ errpos, (sectorsize + eccbytes) * 8);
+ return -EINVAL;
+ }
+
+ dev_dbg(pmecc->dev,
+ "Bit flip in %s area, byte %d: 0x%02x -> 0x%02x\n",
+ area, byte, *ptr, (unsigned int)(*ptr ^ BIT(bit)));
+
+ *ptr ^= BIT(bit);
+ }
+
+ return nerrors;
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_correct_sector);
+
+bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user)
+{
+ return user->pmecc->caps->correct_erased_chunks;
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_correct_erased_chunks);
+
+void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
+ int sector, void *ecc)
+{
+ struct atmel_pmecc *pmecc = user->pmecc;
+ u8 *ptr = ecc;
+ int i;
+
+ for (i = 0; i < user->eccbytes; i++)
+ ptr[i] = readb_relaxed(pmecc->regs.base +
+ ATMEL_PMECC_ECC(sector, i));
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
+
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
+{
+ writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
+
+int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
+{
+ struct atmel_pmecc *pmecc = user->pmecc;
+ u32 cfg;
+
+ if (op != NAND_ECC_READ && op != NAND_ECC_WRITE) {
+ dev_err(pmecc->dev, "Bad ECC operation!");
+ return -EINVAL;
+ }
+
+ mutex_lock(&user->pmecc->lock);
+
+ cfg = user->cache.cfg;
+ if (op == NAND_ECC_WRITE)
+ cfg |= PMECC_CFG_WRITE_OP;
+ else
+ cfg |= PMECC_CFG_AUTO_ENABLE;
+
+ writel(cfg, pmecc->regs.base + ATMEL_PMECC_CFG);
+ writel(user->cache.sarea, pmecc->regs.base + ATMEL_PMECC_SAREA);
+ writel(user->cache.saddr, pmecc->regs.base + ATMEL_PMECC_SADDR);
+ writel(user->cache.eaddr, pmecc->regs.base + ATMEL_PMECC_EADDR);
+
+ writel(PMECC_CTRL_ENABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ writel(PMECC_CTRL_DATA, pmecc->regs.base + ATMEL_PMECC_CTRL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
+
+void atmel_pmecc_disable(struct atmel_pmecc_user *user)
+{
+ atmel_pmecc_reset(user->pmecc);
+ mutex_unlock(&user->pmecc->lock);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
+
+int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user)
+{
+ struct atmel_pmecc *pmecc = user->pmecc;
+ u32 status;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(pmecc->regs.base +
+ ATMEL_PMECC_SR,
+ status, !(status & PMECC_SR_BUSY), 0,
+ PMECC_MAX_TIMEOUT_MS * 1000);
+ if (ret) {
+ dev_err(pmecc->dev,
+ "Timeout while waiting for PMECC ready.\n");
+ return ret;
+ }
+
+ user->isr = readl_relaxed(pmecc->regs.base + ATMEL_PMECC_ISR);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_wait_rdy);
+
+static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
+ const struct atmel_pmecc_caps *caps,
+ int pmecc_res_idx, int errloc_res_idx)
+{
+ struct device *dev = &pdev->dev;
+ struct atmel_pmecc *pmecc;
+ struct resource *res;
+
+ pmecc = devm_kzalloc(dev, sizeof(*pmecc), GFP_KERNEL);
+ if (!pmecc)
+ return ERR_PTR(-ENOMEM);
+
+ pmecc->caps = caps;
+ pmecc->dev = dev;
+ mutex_init(&pmecc->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, pmecc_res_idx);
+ pmecc->regs.base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pmecc->regs.base))
+ return ERR_CAST(pmecc->regs.base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, errloc_res_idx);
+ pmecc->regs.errloc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pmecc->regs.errloc))
+ return ERR_CAST(pmecc->regs.errloc);
+
+ /* Disable all interrupts before registering the PMECC handler. */
+ writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
+ atmel_pmecc_reset(pmecc);
+
+ return pmecc;
+}
+
+static void devm_atmel_pmecc_put(struct device *dev, void *res)
+{
+ struct atmel_pmecc **pmecc = res;
+
+ put_device((*pmecc)->dev);
+}
+
+static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev,
+ struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct atmel_pmecc *pmecc, **ptr;
+ int ret;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+ pmecc = platform_get_drvdata(pdev);
+ if (!pmecc) {
+ ret = -EPROBE_DEFER;
+ goto err_put_device;
+ }
+
+ ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto err_put_device;
+ }
+
+ *ptr = pmecc;
+
+ devres_add(userdev, ptr);
+
+ return pmecc;
+
+err_put_device:
+ put_device(&pdev->dev);
+ return ERR_PTR(ret);
+}
+
+static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 };
+
+static struct atmel_pmecc_caps at91sam9g45_caps = {
+ .strengths = atmel_pmecc_strengths,
+ .nstrengths = 5,
+ .el_offset = 0x8c,
+};
+
+static struct atmel_pmecc_caps sama5d4_caps = {
+ .strengths = atmel_pmecc_strengths,
+ .nstrengths = 5,
+ .el_offset = 0x8c,
+ .correct_erased_chunks = true,
+};
+
+static struct atmel_pmecc_caps sama5d2_caps = {
+ .strengths = atmel_pmecc_strengths,
+ .nstrengths = 6,
+ .el_offset = 0xac,
+ .correct_erased_chunks = true,
+};
+
+static const struct of_device_id atmel_pmecc_legacy_match[] = {
+ { .compatible = "atmel,sama5d4-nand", &sama5d4_caps },
+ { .compatible = "atmel,sama5d2-nand", &sama5d2_caps },
+ { /* sentinel */ }
+};
+
+struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
+{
+ struct atmel_pmecc *pmecc;
+ struct device_node *np;
+
+ if (!userdev)
+ return ERR_PTR(-EINVAL);
+
+ if (!userdev->of_node)
+ return NULL;
+
+ np = of_parse_phandle(userdev->of_node, "ecc-engine", 0);
+ if (np) {
+ pmecc = atmel_pmecc_get_by_node(userdev, np);
+ of_node_put(np);
+ } else {
+ /*
+ * Support old DT bindings: in this case the PMECC iomem
+ * resources are directly defined in the user pdev at position
+ * 1 and 2. Extract all relevant information from there.
+ */
+ struct platform_device *pdev = to_platform_device(userdev);
+ const struct atmel_pmecc_caps *caps;
+ const struct of_device_id *match;
+
+ /* No PMECC engine available. */
+ if (!of_property_read_bool(userdev->of_node,
+ "atmel,has-pmecc"))
+ return NULL;
+
+ caps = &at91sam9g45_caps;
+
+ /* Find the caps associated to the NAND dev node. */
+ match = of_match_node(atmel_pmecc_legacy_match,
+ userdev->of_node);
+ if (match && match->data)
+ caps = match->data;
+
+ pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
+ }
+
+ return pmecc;
+}
+EXPORT_SYMBOL(devm_atmel_pmecc_get);
+
+static const struct of_device_id atmel_pmecc_match[] = {
+ { .compatible = "atmel,at91sam9g45-pmecc", &at91sam9g45_caps },
+ { .compatible = "atmel,sama5d4-pmecc", &sama5d4_caps },
+ { .compatible = "atmel,sama5d2-pmecc", &sama5d2_caps },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_pmecc_match);
+
+static int atmel_pmecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct atmel_pmecc_caps *caps;
+ struct atmel_pmecc *pmecc;
+
+ caps = of_device_get_match_data(&pdev->dev);
+ if (!caps) {
+ dev_err(dev, "Invalid caps\n");
+ return -EINVAL;
+ }
+
+ pmecc = atmel_pmecc_create(pdev, caps, 0, 1);
+ if (IS_ERR(pmecc))
+ return PTR_ERR(pmecc);
+
+ platform_set_drvdata(pdev, pmecc);
+
+ return 0;
+}
+
+static struct platform_driver atmel_pmecc_driver = {
+ .driver = {
+ .name = "atmel-pmecc",
+ .of_match_table = of_match_ptr(atmel_pmecc_match),
+ },
+ .probe = atmel_pmecc_probe,
+};
+module_platform_driver(atmel_pmecc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("PMECC engine driver");
+MODULE_ALIAS("platform:atmel_pmecc");
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.h b/drivers/mtd/nand/raw/atmel/pmecc.h
new file mode 100644
index 000000000..7851c0512
--- /dev/null
+++ b/drivers/mtd/nand/raw/atmel/pmecc.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * © Copyright 2016 ATMEL
+ * © Copyright 2016 Free Electrons
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ * Derived from the atmel_nand.c driver which contained the following
+ * copyrights:
+ *
+ * Copyright © 2003 Rick Bronson
+ *
+ * Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
+ * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * Derived from drivers/mtd/spia.c (removed in v3.8)
+ * Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
+ *
+ *
+ * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
+ * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
+ *
+ * Derived from Das U-Boot source code
+ * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
+ * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ * Add Programmable Multibit ECC support for various AT91 SoC
+ * © Copyright 2012 ATMEL, Hong Xu
+ *
+ * Add Nand Flash Controller support for SAMA5 SoC
+ * © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
+ */
+
+#ifndef ATMEL_PMECC_H
+#define ATMEL_PMECC_H
+
+#define ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH 0
+#define ATMEL_PMECC_SECTOR_SIZE_AUTO 0
+#define ATMEL_PMECC_OOBOFFSET_AUTO -1
+
+struct atmel_pmecc_user_req {
+ int pagesize;
+ int oobsize;
+ struct {
+ int strength;
+ int bytes;
+ int sectorsize;
+ int nsectors;
+ int ooboffset;
+ } ecc;
+};
+
+struct atmel_pmecc *devm_atmel_pmecc_get(struct device *dev);
+
+struct atmel_pmecc_user *
+atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
+ struct atmel_pmecc_user_req *req);
+void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
+
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
+int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
+void atmel_pmecc_disable(struct atmel_pmecc_user *user);
+int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
+int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector,
+ void *data, void *ecc);
+bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user);
+void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
+ int sector, void *ecc);
+
+#endif /* ATMEL_PMECC_H */
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
new file mode 100644
index 000000000..48901a1b8
--- /dev/null
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 Embedded Edge, LLC
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1550nd.h>
+
+
+struct au1550nd_ctx {
+ struct nand_controller controller;
+ struct nand_chip chip;
+
+ int cs;
+ void __iomem *base;
+};
+
+static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this)
+{
+ return container_of(this, struct au1550nd_ctx, chip);
+}
+
+/**
+ * au_write_buf - write buffer to chip
+ * @this: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * write function for 8bit buswidth
+ */
+static void au_write_buf(struct nand_chip *this, const void *buf,
+ unsigned int len)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ const u8 *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ writeb(p[i], ctx->base + MEM_STNAND_DATA);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+/**
+ * au_read_buf - read chip data into buffer
+ * @this: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * read function for 8bit buswidth
+ */
+static void au_read_buf(struct nand_chip *this, void *buf,
+ unsigned int len)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ u8 *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ p[i] = readb(ctx->base + MEM_STNAND_DATA);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+/**
+ * au_write_buf16 - write buffer to chip
+ * @this: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * write function for 16bit buswidth
+ */
+static void au_write_buf16(struct nand_chip *this, const void *buf,
+ unsigned int len)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ const u16 *p = buf;
+ unsigned int i;
+
+ len >>= 1;
+ for (i = 0; i < len; i++) {
+ writew(p[i], ctx->base + MEM_STNAND_DATA);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+/**
+ * au_read_buf16 - read chip data into buffer
+ * @this: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * read function for 16bit buswidth
+ */
+static void au_read_buf16(struct nand_chip *this, void *buf, unsigned int len)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ unsigned int i;
+ u16 *p = buf;
+
+ len >>= 1;
+ for (i = 0; i < len; i++) {
+ p[i] = readw(ctx->base + MEM_STNAND_DATA);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+static int find_nand_cs(unsigned long nand_base)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+ unsigned long addr, staddr, start, mask, end;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ addr = 0x1000 + (i * 0x10); /* CSx */
+ staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
+ /* figure out the decoded range of this CS */
+ start = (staddr << 4) & 0xfffc0000;
+ mask = (staddr << 18) & 0xfffc0000;
+ end = (start | (start - 1)) & ~(start ^ mask);
+ if ((nand_base >= start) && (nand_base < end))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int au1550nd_waitrdy(struct nand_chip *this, unsigned int timeout_ms)
+{
+ unsigned long timeout_jiffies = jiffies;
+
+ timeout_jiffies += msecs_to_jiffies(timeout_ms) + 1;
+ do {
+ if (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1)
+ return 0;
+
+ usleep_range(10, 100);
+ } while (time_before(jiffies, timeout_jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int au1550nd_exec_instr(struct nand_chip *this,
+ const struct nand_op_instr *instr)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ unsigned int i;
+ int ret = 0;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ ctx->base + MEM_STNAND_CMD);
+ /* Drain the writebuffer */
+ wmb();
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ writeb(instr->ctx.addr.addrs[i],
+ ctx->base + MEM_STNAND_ADDR);
+ /* Drain the writebuffer */
+ wmb();
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ if ((this->options & NAND_BUSWIDTH_16) &&
+ !instr->ctx.data.force_8bit)
+ au_read_buf16(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ au_read_buf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ if ((this->options & NAND_BUSWIDTH_16) &&
+ !instr->ctx.data.force_8bit)
+ au_write_buf16(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ au_write_buf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = au1550nd_waitrdy(this, instr->ctx.waitrdy.timeout_ms);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return ret;
+}
+
+static int au1550nd_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ unsigned int i;
+ int ret;
+
+ if (check_only)
+ return 0;
+
+ /* assert (force assert) chip enable */
+ alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
+ /* Drain the writebuffer */
+ wmb();
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = au1550nd_exec_instr(this, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ /* deassert chip enable */
+ alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
+ /* Drain the writebuffer */
+ wmb();
+
+ return ret;
+}
+
+static int au1550nd_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops au1550nd_ops = {
+ .exec_op = au1550nd_exec_op,
+ .attach_chip = au1550nd_attach_chip,
+};
+
+static int au1550nd_probe(struct platform_device *pdev)
+{
+ struct au1550nd_platdata *pd;
+ struct au1550nd_ctx *ctx;
+ struct nand_chip *this;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int ret, cs;
+
+ pd = dev_get_platdata(&pdev->dev);
+ if (!pd) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no NAND memory resource\n");
+ ret = -ENODEV;
+ goto out1;
+ }
+ if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
+ dev_err(&pdev->dev, "cannot claim NAND memory area\n");
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ ctx->base = ioremap(r->start, 0x1000);
+ if (!ctx->base) {
+ dev_err(&pdev->dev, "cannot remap NAND memory area\n");
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ this = &ctx->chip;
+ mtd = nand_to_mtd(this);
+ mtd->dev.parent = &pdev->dev;
+
+ /* figure out which CS# r->start belongs to */
+ cs = find_nand_cs(r->start);
+ if (cs < 0) {
+ dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
+ ret = -ENODEV;
+ goto out3;
+ }
+ ctx->cs = cs;
+
+ nand_controller_init(&ctx->controller);
+ ctx->controller.ops = &au1550nd_ops;
+ this->controller = &ctx->controller;
+
+ if (pd->devwidth)
+ this->options |= NAND_BUSWIDTH_16;
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ ret = nand_scan(this, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
+ goto out3;
+ }
+
+ mtd_device_register(mtd, pd->parts, pd->num_parts);
+
+ platform_set_drvdata(pdev, ctx);
+
+ return 0;
+
+out3:
+ iounmap(ctx->base);
+out2:
+ release_mem_region(r->start, resource_size(r));
+out1:
+ kfree(ctx);
+ return ret;
+}
+
+static int au1550nd_remove(struct platform_device *pdev)
+{
+ struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
+ struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct nand_chip *chip = &ctx->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ iounmap(ctx->base);
+ release_mem_region(r->start, 0x1000);
+ kfree(ctx);
+ return 0;
+}
+
+static struct platform_driver au1550nd_driver = {
+ .driver = {
+ .name = "au1550-nand",
+ },
+ .probe = au1550nd_probe,
+ .remove = au1550nd_remove,
+};
+
+module_platform_driver(au1550nd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Embedded Edge, LLC");
+MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/Makefile b/drivers/mtd/nand/raw/bcm47xxnflash/Makefile
new file mode 100644
index 000000000..b531a630c
--- /dev/null
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+bcm47xxnflash-y += main.o
+bcm47xxnflash-y += ops_bcm4706.o
+
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash.o
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/raw/bcm47xxnflash/bcm47xxnflash.h
new file mode 100644
index 000000000..201b9baa5
--- /dev/null
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/bcm47xxnflash.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCM47XXNFLASH_H
+#define __BCM47XXNFLASH_H
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+
+struct bcm47xxnflash {
+ struct bcma_drv_cc *cc;
+
+ struct nand_chip nand_chip;
+
+ unsigned curr_command;
+ int curr_page_addr;
+ int curr_column;
+
+ u8 id_data[8];
+};
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
+
+#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/main.c b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
new file mode 100644
index 000000000..dcc70d9dc
--- /dev/null
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ */
+
+#include "bcm47xxnflash.h"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rafał Miłecki");
+
+static const char *probes[] = { "bcm47xxpart", NULL };
+
+static int bcm47xxnflash_probe(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxnflash *b47n;
+ struct mtd_info *mtd;
+ int err = 0;
+
+ b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
+ if (!b47n)
+ return -ENOMEM;
+
+ nand_set_controller_data(&b47n->nand_chip, b47n);
+ mtd = nand_to_mtd(&b47n->nand_chip);
+ mtd->dev.parent = &pdev->dev;
+ b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
+
+ if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ err = bcm47xxnflash_ops_bcm4706_init(b47n);
+ } else {
+ pr_err("Device not supported\n");
+ err = -ENOTSUPP;
+ }
+ if (err) {
+ pr_err("Initialization failed: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, b47n);
+
+ err = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int bcm47xxnflash_remove(struct platform_device *pdev)
+{
+ struct bcm47xxnflash *nflash = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &nflash->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ return 0;
+}
+
+static struct platform_driver bcm47xxnflash_driver = {
+ .probe = bcm47xxnflash_probe,
+ .remove = bcm47xxnflash_remove,
+ .driver = {
+ .name = "bcma_nflash",
+ },
+};
+
+module_platform_driver(bcm47xxnflash_driver);
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
new file mode 100644
index 000000000..8bb17c5a6
--- /dev/null
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ */
+
+#include "bcm47xxnflash.h"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/bcma/bcma.h>
+
+/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
+ * shown ~1000 retries as maxiumum. */
+#define NFLASH_READY_RETRIES 10000
+
+#define NFLASH_SECTOR_SIZE 512
+
+#define NCTL_CMD0 0x00010000
+#define NCTL_COL 0x00020000 /* Update column with value from BCMA_CC_NFLASH_COL_ADDR */
+#define NCTL_ROW 0x00040000 /* Update row (page) with value from BCMA_CC_NFLASH_ROW_ADDR */
+#define NCTL_CMD1W 0x00080000
+#define NCTL_READ 0x00100000
+#define NCTL_WRITE 0x00200000
+#define NCTL_SPECADDR 0x01000000
+#define NCTL_READY 0x04000000
+#define NCTL_ERR 0x08000000
+#define NCTL_CSA 0x40000000
+#define NCTL_START 0x80000000
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
+{
+ return ((ns * 1000 * clock) / 1000000) + 1;
+}
+
+static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
+{
+ int i = 0;
+
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
+ i = 0;
+ break;
+ }
+ }
+ if (i) {
+ pr_err("NFLASH control command not ready!\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
+{
+ int i;
+
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
+ BCMA_CC_NFLASH_CTL_ERR) {
+ pr_err("Error on polling\n");
+ return -EBUSY;
+ } else {
+ return 0;
+ }
+ }
+ }
+
+ pr_err("Polling timeout!\n");
+ return -EBUSY;
+}
+
+/**************************************************
+ * R/W
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+ u32 ctlcode;
+ u32 *dest = (u32 *)buf;
+ int i;
+ int toread;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ while (len) {
+ /* We can read maximum of 0x200 bytes at once */
+ toread = min(len, 0x200);
+
+ /* Set page and column */
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to read */
+ ctlcode = NCTL_CSA | NCTL_CMD1W | NCTL_ROW | NCTL_COL |
+ NCTL_CMD0;
+ ctlcode |= NAND_CMD_READSTART << 8;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
+ return;
+ if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
+ return;
+
+ /* Eventually read some data :) */
+ for (i = 0; i < toread; i += 4, dest++) {
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
+ if (i == toread - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode))
+ return;
+ *dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
+ }
+
+ b47n->curr_column += toread;
+ len -= toread;
+ }
+}
+
+static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+ struct bcma_drv_cc *cc = b47n->cc;
+
+ u32 ctlcode;
+ const u32 *data = (u32 *)buf;
+ int i;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ for (i = 0; i < len; i += 4, data++) {
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
+
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
+ if (i == len - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
+ pr_err("%s ctl_cmd didn't work!\n", __func__);
+ return;
+ }
+ }
+
+ b47n->curr_column += len;
+}
+
+/**************************************************
+ * NAND chip ops
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
+ int cmd, unsigned int ctrl)
+{
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+ u32 code = 0;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (cmd & NAND_CTRL_CLE)
+ code = cmd | NCTL_CMD0;
+
+ /* nCS is not needed for reset command */
+ if (cmd != NAND_CMD_RESET)
+ code |= NCTL_CSA;
+
+ bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+}
+
+/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
+static void bcm47xxnflash_ops_bcm4706_select_chip(struct nand_chip *chip,
+ int cs)
+{
+ return;
+}
+
+static int bcm47xxnflash_ops_bcm4706_dev_ready(struct nand_chip *nand_chip)
+{
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+ return !!(bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_CTL) & NCTL_READY);
+}
+
+/*
+ * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
+ * For example, reading chip id is performed in a non-standard way.
+ * Setting column and page is also handled differently, we use a special
+ * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
+ * standard commands would be much more complicated.
+ */
+static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct nand_chip *nand_chip,
+ unsigned command, int column,
+ int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 ctlcode;
+ int i;
+
+ if (column != -1)
+ b47n->curr_column = column;
+ if (page_addr != -1)
+ b47n->curr_page_addr = page_addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ nand_chip->legacy.cmd_ctrl(nand_chip, command, NAND_CTRL_CLE);
+
+ ndelay(100);
+ nand_wait_ready(nand_chip);
+ break;
+ case NAND_CMD_READID:
+ ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
+ ctlcode |= NAND_CMD_READID;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+
+ /*
+ * Reading is specific, last one has to go without NCTL_CSA
+ * bit. We don't know how many reads NAND subsystem is going
+ * to perform, so cache everything.
+ */
+ for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
+ ctlcode = NCTL_CSA | NCTL_READ;
+ if (i == ARRAY_SIZE(b47n->id_data) - 1)
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+ b47n->id_data[i] =
+ bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
+ & 0xFF;
+ }
+
+ break;
+ case NAND_CMD_STATUS:
+ ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("STATUS command error\n");
+ break;
+ case NAND_CMD_READ0:
+ break;
+ case NAND_CMD_READOOB:
+ if (page_addr != -1)
+ b47n->curr_column += mtd->writesize;
+ break;
+ case NAND_CMD_ERASE1:
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+ ctlcode = NCTL_ROW | NCTL_CMD1W | NCTL_CMD0 |
+ NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("ERASE1 failed\n");
+ break;
+ case NAND_CMD_ERASE2:
+ break;
+ case NAND_CMD_SEQIN:
+ /* Set page and column */
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to write */
+ ctlcode = 0x40000000 | NCTL_ROW | NCTL_COL | NCTL_CMD0;
+ ctlcode |= NAND_CMD_SEQIN;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("SEQIN failed\n");
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_CMD0 |
+ NAND_CMD_PAGEPROG))
+ pr_err("PAGEPROG failed\n");
+ if (bcm47xxnflash_ops_bcm4706_poll(cc))
+ pr_err("PAGEPROG not ready\n");
+ break;
+ default:
+ pr_err("Command 0x%X unsupported\n", command);
+ break;
+ }
+ b47n->curr_command = command;
+}
+
+static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct nand_chip *nand_chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 tmp = 0;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READID:
+ if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
+ pr_err("Requested invalid id_data: %d\n",
+ b47n->curr_column);
+ return 0;
+ }
+ return b47n->id_data[b47n->curr_column++];
+ case NAND_CMD_STATUS:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
+ return 0;
+ return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
+ return tmp & 0xFF;
+ }
+
+ pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
+ return 0;
+}
+
+static void bcm47xxnflash_ops_bcm4706_read_buf(struct nand_chip *nand_chip,
+ uint8_t *buf, int len)
+{
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(nand_to_mtd(nand_chip), buf,
+ len);
+ return;
+ }
+
+ pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
+}
+
+static void bcm47xxnflash_ops_bcm4706_write_buf(struct nand_chip *nand_chip,
+ const uint8_t *buf, int len)
+{
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_SEQIN:
+ bcm47xxnflash_ops_bcm4706_write(nand_to_mtd(nand_chip), buf,
+ len);
+ return;
+ }
+
+ pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
+}
+
+/**************************************************
+ * Init
+ **************************************************/
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)&b47n->nand_chip;
+ int err;
+ u32 freq;
+ u16 clock;
+ u8 w0, w1, w2, w3, w4;
+
+ unsigned long chipsize; /* MiB */
+ u8 tbits, col_bits, col_size, row_bits, row_bsize;
+ u32 val;
+
+ nand_chip->legacy.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
+ nand_chip->legacy.cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
+ nand_chip->legacy.dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
+ b47n->nand_chip.legacy.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
+ b47n->nand_chip.legacy.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
+ b47n->nand_chip.legacy.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
+ b47n->nand_chip.legacy.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
+ b47n->nand_chip.legacy.set_features = nand_get_set_features_notsupp;
+ b47n->nand_chip.legacy.get_features = nand_get_set_features_notsupp;
+
+ nand_chip->legacy.chip_delay = 50;
+ b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
+ /* TODO: implement ECC */
+ b47n->nand_chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_NONE;
+
+ /* Enable NAND flash access */
+ bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ BCMA_CC_4706_FLASHSCFG_NF1);
+
+ /* Configure wait counters */
+ if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
+ /* 400 MHz */
+ freq = 400000000 / 4;
+ } else {
+ freq = bcma_chipco_pll_read(b47n->cc, 4);
+ freq = (freq & 0xFFF) >> 3;
+ /* Fixed reference clock 25 MHz and m = 2 */
+ freq = (freq * 25000000 / 2) / 4;
+ }
+ clock = freq / 1000000;
+ w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
+ w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
+ w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
+ (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
+
+ /* Scan NAND */
+ err = nand_scan(&b47n->nand_chip, 1);
+ if (err) {
+ pr_err("Could not scan NAND flash: %d\n", err);
+ goto exit;
+ }
+
+ /* Configure FLASH */
+ chipsize = nanddev_target_size(&b47n->nand_chip.base) >> 20;
+ tbits = ffs(chipsize); /* find first bit set */
+ if (!tbits || tbits != fls(chipsize)) {
+ pr_err("Invalid flash size: 0x%lX\n", chipsize);
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
+
+ col_bits = b47n->nand_chip.page_shift + 1;
+ col_size = (col_bits + 7) / 8;
+
+ row_bits = tbits - col_bits + 1;
+ row_bsize = (row_bits + 7) / 8;
+
+ val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
+
+exit:
+ if (err)
+ bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ ~BCMA_CC_4706_FLASHSCFG_NF1);
+ return err;
+}
diff --git a/drivers/mtd/nand/raw/brcmnand/Makefile b/drivers/mtd/nand/raw/brcmnand/Makefile
new file mode 100644
index 000000000..195b845e4
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# link order matters; don't link the more generic brcmstb_nand.o before the
+# more specific iproc_nand.o, for instance
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += iproc_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += bcm63138_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += bcm6368_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmstb_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand.o
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
new file mode 100644
index 000000000..71ddcc611
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct bcm63138_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCM63138_NAND_INT_STATUS 0x00
+#define BCM63138_NAND_INT_EN 0x04
+
+enum {
+ BCM63138_CTLRDY = BIT(4),
+};
+
+static bool bcm63138_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcm63138_nand_soc *priv =
+ container_of(soc, struct bcm63138_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM63138_NAND_INT_STATUS;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & BCM63138_CTLRDY) {
+ brcmnand_writel(val & ~BCM63138_CTLRDY, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcm63138_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcm63138_nand_soc *priv =
+ container_of(soc, struct bcm63138_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM63138_NAND_INT_EN;
+ u32 val = brcmnand_readl(mmio);
+
+ if (en)
+ val |= BCM63138_CTLRDY;
+ else
+ val &= ~BCM63138_CTLRDY;
+
+ brcmnand_writel(val, mmio);
+}
+
+static int bcm63138_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm63138_nand_soc *priv;
+ struct brcmnand_soc *soc;
+ struct resource *res;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-int-base");
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcm63138_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id bcm63138_nand_of_match[] = {
+ { .compatible = "brcm,nand-bcm63138" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
+
+static struct platform_driver bcm63138_nand_driver = {
+ .probe = bcm63138_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "bcm63138_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = bcm63138_nand_of_match,
+ }
+};
+module_platform_driver(bcm63138_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for BCM63138");
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
new file mode 100644
index 000000000..7c17ec4ce
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2015 Simon Arlott
+ *
+ * Derived from bcm63138_nand.c:
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h:
+ * Copyright 2000-2010 Broadcom Corporation
+ *
+ * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/flash/nandflash.c:
+ * Copyright 2000-2010 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct bcm6368_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCM6368_NAND_INT 0x00
+#define BCM6368_NAND_STATUS_SHIFT 0
+#define BCM6368_NAND_STATUS_MASK (0xfff << BCM6368_NAND_STATUS_SHIFT)
+#define BCM6368_NAND_ENABLE_SHIFT 16
+#define BCM6368_NAND_ENABLE_MASK (0xffff << BCM6368_NAND_ENABLE_SHIFT)
+#define BCM6368_NAND_BASE_ADDR0 0x04
+#define BCM6368_NAND_BASE_ADDR1 0x0c
+
+enum {
+ BCM6368_NP_READ = BIT(0),
+ BCM6368_BLOCK_ERASE = BIT(1),
+ BCM6368_COPY_BACK = BIT(2),
+ BCM6368_PAGE_PGM = BIT(3),
+ BCM6368_CTRL_READY = BIT(4),
+ BCM6368_DEV_RBPIN = BIT(5),
+ BCM6368_ECC_ERR_UNC = BIT(6),
+ BCM6368_ECC_ERR_CORR = BIT(7),
+};
+
+static bool bcm6368_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcm6368_nand_soc *priv =
+ container_of(soc, struct bcm6368_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM6368_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & (BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT)) {
+ /* Ack interrupt */
+ val &= ~BCM6368_NAND_STATUS_MASK;
+ val |= BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT;
+ brcmnand_writel(val, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcm6368_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcm6368_nand_soc *priv =
+ container_of(soc, struct bcm6368_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM6368_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ /* Don't ack any interrupts */
+ val &= ~BCM6368_NAND_STATUS_MASK;
+
+ if (en)
+ val |= BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT;
+ else
+ val &= ~(BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT);
+
+ brcmnand_writel(val, mmio);
+}
+
+static int bcm6368_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm6368_nand_soc *priv;
+ struct brcmnand_soc *soc;
+ struct resource *res;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "nand-int-base");
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcm6368_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcm6368_nand_intc_set;
+
+ /* Disable and ack all interrupts */
+ brcmnand_writel(0, priv->base + BCM6368_NAND_INT);
+ brcmnand_writel(BCM6368_NAND_STATUS_MASK,
+ priv->base + BCM6368_NAND_INT);
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id bcm6368_nand_of_match[] = {
+ { .compatible = "brcm,nand-bcm6368" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm6368_nand_of_match);
+
+static struct platform_driver bcm6368_nand_driver = {
+ .probe = bcm6368_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "bcm6368_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = bcm6368_nand_of_match,
+ }
+};
+module_platform_driver(bcm6368_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Arlott");
+MODULE_DESCRIPTION("NAND driver for BCM6368");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
new file mode 100644
index 000000000..11d706ff3
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -0,0 +1,3242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2010-2015 Broadcom Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/ioport.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/static_key.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+
+#include "brcmnand.h"
+
+/*
+ * This flag controls if WP stays on between erase/write commands to mitigate
+ * flash corruption due to power glitches. Values:
+ * 0: NAND_WP is not used or not available
+ * 1: NAND_WP is set by default, cleared for erase/write operations
+ * 2: NAND_WP is always cleared
+ */
+static int wp_on = 1;
+module_param(wp_on, int, 0444);
+
+/***********************************************************************
+ * Definitions
+ ***********************************************************************/
+
+#define DRV_NAME "brcmnand"
+
+#define CMD_NULL 0x00
+#define CMD_PAGE_READ 0x01
+#define CMD_SPARE_AREA_READ 0x02
+#define CMD_STATUS_READ 0x03
+#define CMD_PROGRAM_PAGE 0x04
+#define CMD_PROGRAM_SPARE_AREA 0x05
+#define CMD_COPY_BACK 0x06
+#define CMD_DEVICE_ID_READ 0x07
+#define CMD_BLOCK_ERASE 0x08
+#define CMD_FLASH_RESET 0x09
+#define CMD_BLOCKS_LOCK 0x0a
+#define CMD_BLOCKS_LOCK_DOWN 0x0b
+#define CMD_BLOCKS_UNLOCK 0x0c
+#define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
+#define CMD_PARAMETER_READ 0x0e
+#define CMD_PARAMETER_CHANGE_COL 0x0f
+#define CMD_LOW_LEVEL_OP 0x10
+
+struct brcm_nand_dma_desc {
+ u32 next_desc;
+ u32 next_desc_ext;
+ u32 cmd_irq;
+ u32 dram_addr;
+ u32 dram_addr_ext;
+ u32 tfr_len;
+ u32 total_len;
+ u32 flash_addr;
+ u32 flash_addr_ext;
+ u32 cs;
+ u32 pad2[5];
+ u32 status_valid;
+} __packed;
+
+/* Bitfields for brcm_nand_dma_desc::status_valid */
+#define FLASH_DMA_ECC_ERROR (1 << 8)
+#define FLASH_DMA_CORR_ERROR (1 << 9)
+
+/* Bitfields for DMA_MODE */
+#define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */
+#define FLASH_DMA_MODE_MODE BIT(0) /* link list */
+#define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \
+ FLASH_DMA_MODE_MODE)
+
+/* 512B flash cache in the NAND controller HW */
+#define FC_SHIFT 9U
+#define FC_BYTES 512U
+#define FC_WORDS (FC_BYTES >> 2)
+
+#define BRCMNAND_MIN_PAGESIZE 512
+#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
+#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
+
+#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
+#define NAND_POLL_STATUS_TIMEOUT_MS 100
+
+#define EDU_CMD_WRITE 0x00
+#define EDU_CMD_READ 0x01
+#define EDU_STATUS_ACTIVE BIT(0)
+#define EDU_ERR_STATUS_ERRACK BIT(0)
+#define EDU_DONE_MASK GENMASK(1, 0)
+
+#define EDU_CONFIG_MODE_NAND BIT(0)
+#define EDU_CONFIG_SWAP_BYTE BIT(1)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define EDU_CONFIG_SWAP_CFG EDU_CONFIG_SWAP_BYTE
+#else
+#define EDU_CONFIG_SWAP_CFG 0
+#endif
+
+/* edu registers */
+enum edu_reg {
+ EDU_CONFIG = 0,
+ EDU_DRAM_ADDR,
+ EDU_EXT_ADDR,
+ EDU_LENGTH,
+ EDU_CMD,
+ EDU_STOP,
+ EDU_STATUS,
+ EDU_DONE,
+ EDU_ERR_STATUS,
+};
+
+static const u16 edu_regs[] = {
+ [EDU_CONFIG] = 0x00,
+ [EDU_DRAM_ADDR] = 0x04,
+ [EDU_EXT_ADDR] = 0x08,
+ [EDU_LENGTH] = 0x0c,
+ [EDU_CMD] = 0x10,
+ [EDU_STOP] = 0x14,
+ [EDU_STATUS] = 0x18,
+ [EDU_DONE] = 0x1c,
+ [EDU_ERR_STATUS] = 0x20,
+};
+
+/* flash_dma registers */
+enum flash_dma_reg {
+ FLASH_DMA_REVISION = 0,
+ FLASH_DMA_FIRST_DESC,
+ FLASH_DMA_FIRST_DESC_EXT,
+ FLASH_DMA_CTRL,
+ FLASH_DMA_MODE,
+ FLASH_DMA_STATUS,
+ FLASH_DMA_INTERRUPT_DESC,
+ FLASH_DMA_INTERRUPT_DESC_EXT,
+ FLASH_DMA_ERROR_STATUS,
+ FLASH_DMA_CURRENT_DESC,
+ FLASH_DMA_CURRENT_DESC_EXT,
+};
+
+/* flash_dma registers v0*/
+static const u16 flash_dma_regs_v0[] = {
+ [FLASH_DMA_REVISION] = 0x00,
+ [FLASH_DMA_FIRST_DESC] = 0x04,
+ [FLASH_DMA_CTRL] = 0x08,
+ [FLASH_DMA_MODE] = 0x0c,
+ [FLASH_DMA_STATUS] = 0x10,
+ [FLASH_DMA_INTERRUPT_DESC] = 0x14,
+ [FLASH_DMA_ERROR_STATUS] = 0x18,
+ [FLASH_DMA_CURRENT_DESC] = 0x1c,
+};
+
+/* flash_dma registers v1*/
+static const u16 flash_dma_regs_v1[] = {
+ [FLASH_DMA_REVISION] = 0x00,
+ [FLASH_DMA_FIRST_DESC] = 0x04,
+ [FLASH_DMA_FIRST_DESC_EXT] = 0x08,
+ [FLASH_DMA_CTRL] = 0x0c,
+ [FLASH_DMA_MODE] = 0x10,
+ [FLASH_DMA_STATUS] = 0x14,
+ [FLASH_DMA_INTERRUPT_DESC] = 0x18,
+ [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c,
+ [FLASH_DMA_ERROR_STATUS] = 0x20,
+ [FLASH_DMA_CURRENT_DESC] = 0x24,
+ [FLASH_DMA_CURRENT_DESC_EXT] = 0x28,
+};
+
+/* flash_dma registers v4 */
+static const u16 flash_dma_regs_v4[] = {
+ [FLASH_DMA_REVISION] = 0x00,
+ [FLASH_DMA_FIRST_DESC] = 0x08,
+ [FLASH_DMA_FIRST_DESC_EXT] = 0x0c,
+ [FLASH_DMA_CTRL] = 0x10,
+ [FLASH_DMA_MODE] = 0x14,
+ [FLASH_DMA_STATUS] = 0x18,
+ [FLASH_DMA_INTERRUPT_DESC] = 0x20,
+ [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24,
+ [FLASH_DMA_ERROR_STATUS] = 0x28,
+ [FLASH_DMA_CURRENT_DESC] = 0x30,
+ [FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
+};
+
+/* Controller feature flags */
+enum {
+ BRCMNAND_HAS_1K_SECTORS = BIT(0),
+ BRCMNAND_HAS_PREFETCH = BIT(1),
+ BRCMNAND_HAS_CACHE_MODE = BIT(2),
+ BRCMNAND_HAS_WP = BIT(3),
+};
+
+struct brcmnand_host;
+
+static DEFINE_STATIC_KEY_FALSE(brcmnand_soc_has_ops_key);
+
+struct brcmnand_controller {
+ struct device *dev;
+ struct nand_controller controller;
+ void __iomem *nand_base;
+ void __iomem *nand_fc; /* flash cache */
+ void __iomem *flash_dma_base;
+ unsigned int irq;
+ unsigned int dma_irq;
+ int nand_version;
+
+ /* Some SoCs provide custom interrupt status register(s) */
+ struct brcmnand_soc *soc;
+
+ /* Some SoCs have a gateable clock for the controller */
+ struct clk *clk;
+
+ int cmd_pending;
+ bool dma_pending;
+ bool edu_pending;
+ struct completion done;
+ struct completion dma_done;
+ struct completion edu_done;
+
+ /* List of NAND hosts (one for each chip-select) */
+ struct list_head host_list;
+
+ /* EDU info, per-transaction */
+ const u16 *edu_offsets;
+ void __iomem *edu_base;
+ int edu_irq;
+ int edu_count;
+ u64 edu_dram_addr;
+ u32 edu_ext_addr;
+ u32 edu_cmd;
+ u32 edu_config;
+
+ /* flash_dma reg */
+ const u16 *flash_dma_offsets;
+ struct brcm_nand_dma_desc *dma_desc;
+ dma_addr_t dma_pa;
+
+ int (*dma_trans)(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 dma_cmd);
+
+ /* in-memory cache of the FLASH_CACHE, used only for some commands */
+ u8 flash_cache[FC_BYTES];
+
+ /* Controller revision details */
+ const u16 *reg_offsets;
+ unsigned int reg_spacing; /* between CS1, CS2, ... regs */
+ const u8 *cs_offsets; /* within each chip-select */
+ const u8 *cs0_offsets; /* within CS0, if different */
+ unsigned int max_block_size;
+ const unsigned int *block_sizes;
+ unsigned int max_page_size;
+ const unsigned int *page_sizes;
+ unsigned int page_size_shift;
+ unsigned int max_oob;
+ u32 ecc_level_shift;
+ u32 features;
+
+ /* for low-power standby/resume only */
+ u32 nand_cs_nand_select;
+ u32 nand_cs_nand_xor;
+ u32 corr_stat_threshold;
+ u32 flash_dma_mode;
+ u32 flash_edu_mode;
+ bool pio_poll_mode;
+};
+
+struct brcmnand_cfg {
+ u64 device_size;
+ unsigned int block_size;
+ unsigned int page_size;
+ unsigned int spare_area_size;
+ unsigned int device_width;
+ unsigned int col_adr_bytes;
+ unsigned int blk_adr_bytes;
+ unsigned int ful_adr_bytes;
+ unsigned int sector_size_1k;
+ unsigned int ecc_level;
+ /* use for low-power standby/resume only */
+ u32 acc_control;
+ u32 config;
+ u32 config_ext;
+ u32 timing_1;
+ u32 timing_2;
+};
+
+struct brcmnand_host {
+ struct list_head node;
+
+ struct nand_chip chip;
+ struct platform_device *pdev;
+ int cs;
+
+ unsigned int last_cmd;
+ unsigned int last_byte;
+ u64 last_addr;
+ struct brcmnand_cfg hwcfg;
+ struct brcmnand_controller *ctrl;
+};
+
+enum brcmnand_reg {
+ BRCMNAND_CMD_START = 0,
+ BRCMNAND_CMD_EXT_ADDRESS,
+ BRCMNAND_CMD_ADDRESS,
+ BRCMNAND_INTFC_STATUS,
+ BRCMNAND_CS_SELECT,
+ BRCMNAND_CS_XOR,
+ BRCMNAND_LL_OP,
+ BRCMNAND_CS0_BASE,
+ BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */
+ BRCMNAND_CORR_THRESHOLD,
+ BRCMNAND_CORR_THRESHOLD_EXT,
+ BRCMNAND_UNCORR_COUNT,
+ BRCMNAND_CORR_COUNT,
+ BRCMNAND_CORR_EXT_ADDR,
+ BRCMNAND_CORR_ADDR,
+ BRCMNAND_UNCORR_EXT_ADDR,
+ BRCMNAND_UNCORR_ADDR,
+ BRCMNAND_SEMAPHORE,
+ BRCMNAND_ID,
+ BRCMNAND_ID_EXT,
+ BRCMNAND_LL_RDATA,
+ BRCMNAND_OOB_READ_BASE,
+ BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */
+ BRCMNAND_OOB_WRITE_BASE,
+ BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */
+ BRCMNAND_FC_BASE,
+};
+
+/* BRCMNAND v2.1-v2.2 */
+static const u16 brcmnand_regs_v21[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x5c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x60,
+ [BRCMNAND_CORR_ADDR] = 0x64,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x68,
+ [BRCMNAND_UNCORR_ADDR] = 0x6c,
+ [BRCMNAND_SEMAPHORE] = 0x50,
+ [BRCMNAND_ID] = 0x54,
+ [BRCMNAND_ID_EXT] = 0,
+ [BRCMNAND_LL_RDATA] = 0,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v3.3-v4.0 */
+static const u16 brcmnand_regs_v33[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x6c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0x178,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0xd0,
+ [BRCMNAND_CORR_THRESHOLD] = 0x84,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x70,
+ [BRCMNAND_CORR_ADDR] = 0x74,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
+ [BRCMNAND_UNCORR_ADDR] = 0x7c,
+ [BRCMNAND_SEMAPHORE] = 0x58,
+ [BRCMNAND_ID] = 0x60,
+ [BRCMNAND_ID_EXT] = 0x64,
+ [BRCMNAND_LL_RDATA] = 0x17c,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0x130,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v5.0 */
+static const u16 brcmnand_regs_v50[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x6c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0x178,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0xd0,
+ [BRCMNAND_CORR_THRESHOLD] = 0x84,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x70,
+ [BRCMNAND_CORR_ADDR] = 0x74,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
+ [BRCMNAND_UNCORR_ADDR] = 0x7c,
+ [BRCMNAND_SEMAPHORE] = 0x58,
+ [BRCMNAND_ID] = 0x60,
+ [BRCMNAND_ID_EXT] = 0x64,
+ [BRCMNAND_LL_RDATA] = 0x17c,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0x130,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0x140,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v6.0 - v7.1 */
+static const u16 brcmnand_regs_v60[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xc0,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x400,
+};
+
+/* BRCMNAND v7.1 */
+static const u16 brcmnand_regs_v71[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x400,
+};
+
+/* BRCMNAND v7.2 */
+static const u16 brcmnand_regs_v72[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x400,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x600,
+};
+
+enum brcmnand_cs_reg {
+ BRCMNAND_CS_CFG_EXT = 0,
+ BRCMNAND_CS_CFG,
+ BRCMNAND_CS_ACC_CONTROL,
+ BRCMNAND_CS_TIMING1,
+ BRCMNAND_CS_TIMING2,
+};
+
+/* Per chip-select offsets for v7.1 */
+static const u8 brcmnand_cs_offsets_v71[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x04,
+ [BRCMNAND_CS_CFG] = 0x08,
+ [BRCMNAND_CS_TIMING1] = 0x0c,
+ [BRCMNAND_CS_TIMING2] = 0x10,
+};
+
+/* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
+static const u8 brcmnand_cs_offsets[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x04,
+ [BRCMNAND_CS_CFG] = 0x04,
+ [BRCMNAND_CS_TIMING1] = 0x08,
+ [BRCMNAND_CS_TIMING2] = 0x0c,
+};
+
+/* Per chip-select offset for <= v5.0 on CS0 only */
+static const u8 brcmnand_cs_offsets_cs0[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x08,
+ [BRCMNAND_CS_CFG] = 0x08,
+ [BRCMNAND_CS_TIMING1] = 0x10,
+ [BRCMNAND_CS_TIMING2] = 0x14,
+};
+
+/*
+ * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
+ * one config register, but once the bitfields overflowed, newer controllers
+ * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
+ */
+enum {
+ CFG_BLK_ADR_BYTES_SHIFT = 8,
+ CFG_COL_ADR_BYTES_SHIFT = 12,
+ CFG_FUL_ADR_BYTES_SHIFT = 16,
+ CFG_BUS_WIDTH_SHIFT = 23,
+ CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT),
+ CFG_DEVICE_SIZE_SHIFT = 24,
+
+ /* Only for v2.1 */
+ CFG_PAGE_SIZE_SHIFT_v2_1 = 30,
+
+ /* Only for pre-v7.1 (with no CFG_EXT register) */
+ CFG_PAGE_SIZE_SHIFT = 20,
+ CFG_BLK_SIZE_SHIFT = 28,
+
+ /* Only for v7.1+ (with CFG_EXT register) */
+ CFG_EXT_PAGE_SIZE_SHIFT = 0,
+ CFG_EXT_BLK_SIZE_SHIFT = 4,
+};
+
+/* BRCMNAND_INTFC_STATUS */
+enum {
+ INTFC_FLASH_STATUS = GENMASK(7, 0),
+
+ INTFC_ERASED = BIT(27),
+ INTFC_OOB_VALID = BIT(28),
+ INTFC_CACHE_VALID = BIT(29),
+ INTFC_FLASH_READY = BIT(30),
+ INTFC_CTLR_READY = BIT(31),
+};
+
+/***********************************************************************
+ * NAND ACC CONTROL bitfield
+ *
+ * Some bits have remained constant throughout hardware revision, while
+ * others have shifted around.
+ ***********************************************************************/
+
+/* Constant for all versions (where supported) */
+enum {
+ /* See BRCMNAND_HAS_CACHE_MODE */
+ ACC_CONTROL_CACHE_MODE = BIT(22),
+
+ /* See BRCMNAND_HAS_PREFETCH */
+ ACC_CONTROL_PREFETCH = BIT(23),
+
+ ACC_CONTROL_PAGE_HIT = BIT(24),
+ ACC_CONTROL_WR_PREEMPT = BIT(25),
+ ACC_CONTROL_PARTIAL_PAGE = BIT(26),
+ ACC_CONTROL_RD_ERASED = BIT(27),
+ ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
+ ACC_CONTROL_WR_ECC = BIT(30),
+ ACC_CONTROL_RD_ECC = BIT(31),
+};
+
+#define ACC_CONTROL_ECC_SHIFT 16
+/* Only for v7.2 */
+#define ACC_CONTROL_ECC_EXT_SHIFT 13
+
+static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
+{
+ return static_branch_unlikely(&brcmnand_soc_has_ops_key);
+}
+
+static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
+{
+ if (brcmnand_non_mmio_ops(ctrl))
+ return brcmnand_soc_read(ctrl->soc, offs);
+ return brcmnand_readl(ctrl->nand_base + offs);
+}
+
+static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
+ u32 val)
+{
+ if (brcmnand_non_mmio_ops(ctrl))
+ brcmnand_soc_write(ctrl->soc, val, offs);
+ else
+ brcmnand_writel(val, ctrl->nand_base + offs);
+}
+
+static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
+{
+ static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
+ static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
+ static const unsigned int block_sizes_v2_2[] = { 16, 128, 8, 512, 256, 0 };
+ static const unsigned int block_sizes_v2_1[] = { 16, 128, 8, 512, 0 };
+ static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 };
+ static const unsigned int page_sizes_v2_2[] = { 512, 2048, 4096, 0 };
+ static const unsigned int page_sizes_v2_1[] = { 512, 2048, 0 };
+
+ ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
+
+ /* Only support v2.1+ */
+ if (ctrl->nand_version < 0x0201) {
+ dev_err(ctrl->dev, "version %#x not supported\n",
+ ctrl->nand_version);
+ return -ENODEV;
+ }
+
+ /* Register offsets */
+ if (ctrl->nand_version >= 0x0702)
+ ctrl->reg_offsets = brcmnand_regs_v72;
+ else if (ctrl->nand_version == 0x0701)
+ ctrl->reg_offsets = brcmnand_regs_v71;
+ else if (ctrl->nand_version >= 0x0600)
+ ctrl->reg_offsets = brcmnand_regs_v60;
+ else if (ctrl->nand_version >= 0x0500)
+ ctrl->reg_offsets = brcmnand_regs_v50;
+ else if (ctrl->nand_version >= 0x0303)
+ ctrl->reg_offsets = brcmnand_regs_v33;
+ else if (ctrl->nand_version >= 0x0201)
+ ctrl->reg_offsets = brcmnand_regs_v21;
+
+ /* Chip-select stride */
+ if (ctrl->nand_version >= 0x0701)
+ ctrl->reg_spacing = 0x14;
+ else
+ ctrl->reg_spacing = 0x10;
+
+ /* Per chip-select registers */
+ if (ctrl->nand_version >= 0x0701) {
+ ctrl->cs_offsets = brcmnand_cs_offsets_v71;
+ } else {
+ ctrl->cs_offsets = brcmnand_cs_offsets;
+
+ /* v3.3-5.0 have a different CS0 offset layout */
+ if (ctrl->nand_version >= 0x0303 &&
+ ctrl->nand_version <= 0x0500)
+ ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
+ }
+
+ /* Page / block sizes */
+ if (ctrl->nand_version >= 0x0701) {
+ /* >= v7.1 use nice power-of-2 values! */
+ ctrl->max_page_size = 16 * 1024;
+ ctrl->max_block_size = 2 * 1024 * 1024;
+ } else {
+ if (ctrl->nand_version >= 0x0304)
+ ctrl->page_sizes = page_sizes_v3_4;
+ else if (ctrl->nand_version >= 0x0202)
+ ctrl->page_sizes = page_sizes_v2_2;
+ else
+ ctrl->page_sizes = page_sizes_v2_1;
+
+ if (ctrl->nand_version >= 0x0202)
+ ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT;
+ else
+ ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1;
+
+ if (ctrl->nand_version >= 0x0600)
+ ctrl->block_sizes = block_sizes_v6;
+ else if (ctrl->nand_version >= 0x0400)
+ ctrl->block_sizes = block_sizes_v4;
+ else if (ctrl->nand_version >= 0x0202)
+ ctrl->block_sizes = block_sizes_v2_2;
+ else
+ ctrl->block_sizes = block_sizes_v2_1;
+
+ if (ctrl->nand_version < 0x0400) {
+ if (ctrl->nand_version < 0x0202)
+ ctrl->max_page_size = 2048;
+ else
+ ctrl->max_page_size = 4096;
+ ctrl->max_block_size = 512 * 1024;
+ }
+ }
+
+ /* Maximum spare area sector size (per 512B) */
+ if (ctrl->nand_version == 0x0702)
+ ctrl->max_oob = 128;
+ else if (ctrl->nand_version >= 0x0600)
+ ctrl->max_oob = 64;
+ else if (ctrl->nand_version >= 0x0500)
+ ctrl->max_oob = 32;
+ else
+ ctrl->max_oob = 16;
+
+ /* v6.0 and newer (except v6.1) have prefetch support */
+ if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
+ ctrl->features |= BRCMNAND_HAS_PREFETCH;
+
+ /*
+ * v6.x has cache mode, but it's implemented differently. Ignore it for
+ * now.
+ */
+ if (ctrl->nand_version >= 0x0700)
+ ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
+
+ if (ctrl->nand_version >= 0x0500)
+ ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
+
+ if (ctrl->nand_version >= 0x0700)
+ ctrl->features |= BRCMNAND_HAS_WP;
+ else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
+ ctrl->features |= BRCMNAND_HAS_WP;
+
+ /* v7.2 has different ecc level shift in the acc register */
+ if (ctrl->nand_version == 0x0702)
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
+ else
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
+
+ return 0;
+}
+
+static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
+{
+ /* flash_dma register offsets */
+ if (ctrl->nand_version >= 0x0703)
+ ctrl->flash_dma_offsets = flash_dma_regs_v4;
+ else if (ctrl->nand_version == 0x0602)
+ ctrl->flash_dma_offsets = flash_dma_regs_v0;
+ else
+ ctrl->flash_dma_offsets = flash_dma_regs_v1;
+}
+
+static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg)
+{
+ u16 offs = ctrl->reg_offsets[reg];
+
+ if (offs)
+ return nand_readreg(ctrl, offs);
+ else
+ return 0;
+}
+
+static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg, u32 val)
+{
+ u16 offs = ctrl->reg_offsets[reg];
+
+ if (offs)
+ nand_writereg(ctrl, offs, val);
+}
+
+static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg, u32 mask, unsigned
+ int shift, u32 val)
+{
+ u32 tmp = brcmnand_read_reg(ctrl, reg);
+
+ tmp &= ~mask;
+ tmp |= val << shift;
+ brcmnand_write_reg(ctrl, reg, tmp);
+}
+
+static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
+{
+ if (brcmnand_non_mmio_ops(ctrl))
+ return brcmnand_soc_read(ctrl->soc, BRCMNAND_NON_MMIO_FC_ADDR);
+ return __raw_readl(ctrl->nand_fc + word * 4);
+}
+
+static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
+ int word, u32 val)
+{
+ if (brcmnand_non_mmio_ops(ctrl))
+ brcmnand_soc_write(ctrl->soc, val, BRCMNAND_NON_MMIO_FC_ADDR);
+ else
+ __raw_writel(val, ctrl->nand_fc + word * 4);
+}
+
+static inline void edu_writel(struct brcmnand_controller *ctrl,
+ enum edu_reg reg, u32 val)
+{
+ u16 offs = ctrl->edu_offsets[reg];
+
+ brcmnand_writel(val, ctrl->edu_base + offs);
+}
+
+static inline u32 edu_readl(struct brcmnand_controller *ctrl,
+ enum edu_reg reg)
+{
+ u16 offs = ctrl->edu_offsets[reg];
+
+ return brcmnand_readl(ctrl->edu_base + offs);
+}
+
+static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
+{
+
+ /* Clear error addresses */
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
+}
+
+static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
+{
+ u64 err_addr;
+
+ err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
+ err_addr |= ((u64)(brcmnand_read_reg(ctrl,
+ BRCMNAND_UNCORR_EXT_ADDR)
+ & 0xffff) << 32);
+
+ return err_addr;
+}
+
+static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
+{
+ u64 err_addr;
+
+ err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
+ err_addr |= ((u64)(brcmnand_read_reg(ctrl,
+ BRCMNAND_CORR_EXT_ADDR)
+ & 0xffff) << 32);
+
+ return err_addr;
+}
+
+static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+ (host->cs << 16) | ((addr >> 32) & 0xffff));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+ lower_32_bits(addr));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+}
+
+static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
+ enum brcmnand_cs_reg reg)
+{
+ u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
+ u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
+ u8 cs_offs;
+
+ if (cs == 0 && ctrl->cs0_offsets)
+ cs_offs = ctrl->cs0_offsets[reg];
+ else
+ cs_offs = ctrl->cs_offsets[reg];
+
+ if (cs && offs_cs1)
+ return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
+
+ return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
+}
+
+static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version < 0x0600)
+ return 1;
+ return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
+}
+
+static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned int shift = 0, bits;
+ enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
+ int cs = host->cs;
+
+ if (!ctrl->reg_offsets[reg])
+ return;
+
+ if (ctrl->nand_version == 0x0702)
+ bits = 7;
+ else if (ctrl->nand_version >= 0x0600)
+ bits = 6;
+ else if (ctrl->nand_version >= 0x0500)
+ bits = 5;
+ else
+ bits = 4;
+
+ if (ctrl->nand_version >= 0x0702) {
+ if (cs >= 4)
+ reg = BRCMNAND_CORR_THRESHOLD_EXT;
+ shift = (cs % 4) * bits;
+ } else if (ctrl->nand_version >= 0x0600) {
+ if (cs >= 5)
+ reg = BRCMNAND_CORR_THRESHOLD_EXT;
+ shift = (cs % 5) * bits;
+ }
+ brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
+}
+
+static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version < 0x0602)
+ return 24;
+ return 0;
+}
+
+static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version == 0x0702)
+ return GENMASK(7, 0);
+ else if (ctrl->nand_version >= 0x0600)
+ return GENMASK(6, 0);
+ else if (ctrl->nand_version >= 0x0303)
+ return GENMASK(5, 0);
+ else
+ return GENMASK(4, 0);
+}
+
+static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
+{
+ u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
+
+ mask <<= ACC_CONTROL_ECC_SHIFT;
+
+ /* v7.2 includes additional ECC levels */
+ if (ctrl->nand_version == 0x0702)
+ mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
+
+ return mask;
+}
+
+static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
+ u32 acc_control = nand_readreg(ctrl, offs);
+ u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
+
+ if (en) {
+ acc_control |= ecc_flags; /* enable RD/WR ECC */
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
+ } else {
+ acc_control &= ~ecc_flags; /* disable RD/WR ECC */
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ }
+
+ nand_writereg(ctrl, offs, acc_control);
+}
+
+static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version >= 0x0702)
+ return 9;
+ else if (ctrl->nand_version >= 0x0600)
+ return 7;
+ else if (ctrl->nand_version >= 0x0500)
+ return 6;
+ else
+ return -1;
+}
+
+static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int shift = brcmnand_sector_1k_shift(ctrl);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+
+ if (shift < 0)
+ return 0;
+
+ return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
+}
+
+static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int shift = brcmnand_sector_1k_shift(ctrl);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u32 tmp;
+
+ if (shift < 0)
+ return;
+
+ tmp = nand_readreg(ctrl, acc_control_offs);
+ tmp &= ~(1 << shift);
+ tmp |= (!!val) << shift;
+ nand_writereg(ctrl, acc_control_offs, tmp);
+}
+
+/***********************************************************************
+ * CS_NAND_SELECT
+ ***********************************************************************/
+
+enum {
+ CS_SELECT_NAND_WP = BIT(29),
+ CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
+};
+
+static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
+ u32 mask, u32 expected_val,
+ unsigned long timeout_ms)
+{
+ unsigned long limit;
+ u32 val;
+
+ if (!timeout_ms)
+ timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
+
+ limit = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
+ cpu_relax();
+ } while (time_after(limit, jiffies));
+
+ /*
+ * do a final check after time out in case the CPU was busy and the driver
+ * did not get enough time to perform the polling to avoid false alarms
+ */
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
+ dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
+ expected_val, val & mask);
+
+ return -ETIMEDOUT;
+}
+
+static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
+{
+ u32 val = en ? CS_SELECT_NAND_WP : 0;
+
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
+}
+
+/***********************************************************************
+ * Flash DMA
+ ***********************************************************************/
+
+static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
+{
+ return ctrl->flash_dma_base;
+}
+
+static inline bool has_edu(struct brcmnand_controller *ctrl)
+{
+ return ctrl->edu_base;
+}
+
+static inline bool use_dma(struct brcmnand_controller *ctrl)
+{
+ return has_flash_dma(ctrl) || has_edu(ctrl);
+}
+
+static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->pio_poll_mode)
+ return;
+
+ if (has_flash_dma(ctrl)) {
+ ctrl->flash_dma_base = NULL;
+ disable_irq(ctrl->dma_irq);
+ }
+
+ disable_irq(ctrl->irq);
+ ctrl->pio_poll_mode = true;
+}
+
+static inline bool flash_dma_buf_ok(const void *buf)
+{
+ return buf && !is_vmalloc_addr(buf) &&
+ likely(IS_ALIGNED((uintptr_t)buf, 4));
+}
+
+static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
+ enum flash_dma_reg dma_reg, u32 val)
+{
+ u16 offs = ctrl->flash_dma_offsets[dma_reg];
+
+ brcmnand_writel(val, ctrl->flash_dma_base + offs);
+}
+
+static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
+ enum flash_dma_reg dma_reg)
+{
+ u16 offs = ctrl->flash_dma_offsets[dma_reg];
+
+ return brcmnand_readl(ctrl->flash_dma_base + offs);
+}
+
+/* Low-level operation types: command, address, write, or read */
+enum brcmnand_llop_type {
+ LL_OP_CMD,
+ LL_OP_ADDR,
+ LL_OP_WR,
+ LL_OP_RD,
+};
+
+/***********************************************************************
+ * Internal support functions
+ ***********************************************************************/
+
+static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
+ struct brcmnand_cfg *cfg)
+{
+ if (ctrl->nand_version <= 0x0701)
+ return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15;
+ else
+ return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15) ||
+ (cfg->spare_area_size == 28 && cfg->ecc_level == 16));
+}
+
+/*
+ * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
+ * the layout/configuration.
+ * Returns -ERRCODE on failure.
+ */
+static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+ if (section >= sectors)
+ return -ERANGE;
+
+ oobregion->offset = (section * sas) + 6;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+ u32 next;
+
+ if (section > sectors)
+ return -ERANGE;
+
+ next = (section * sas);
+ if (section < sectors)
+ next += 6;
+
+ if (section) {
+ oobregion->offset = ((section - 1) * sas) + 9;
+ } else {
+ if (cfg->page_size > 512) {
+ /* Large page NAND uses first 2 bytes for BBI */
+ oobregion->offset = 2;
+ } else {
+ /* Small page NAND uses last byte before ECC for BBI */
+ oobregion->offset = 0;
+ next--;
+ }
+ }
+
+ oobregion->length = next - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
+ .ecc = brcmnand_hamming_ooblayout_ecc,
+ .free = brcmnand_hamming_ooblayout_free,
+};
+
+static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+ if (section >= sectors)
+ return -ERANGE;
+
+ oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+ if (section >= sectors)
+ return -ERANGE;
+
+ if (sas <= chip->ecc.bytes)
+ return 0;
+
+ oobregion->offset = section * sas;
+ oobregion->length = sas - chip->ecc.bytes;
+
+ if (!section) {
+ oobregion->offset++;
+ oobregion->length--;
+ }
+
+ return 0;
+}
+
+static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+
+ if (section > 1 || sas - chip->ecc.bytes < 6 ||
+ (section && sas - chip->ecc.bytes == 6))
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = sas - chip->ecc.bytes - 6;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
+ .ecc = brcmnand_bch_ooblayout_ecc,
+ .free = brcmnand_bch_ooblayout_free_lp,
+};
+
+static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
+ .ecc = brcmnand_bch_ooblayout_ecc,
+ .free = brcmnand_bch_ooblayout_free_sp,
+};
+
+static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
+{
+ struct brcmnand_cfg *p = &host->hwcfg;
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+ struct nand_ecc_ctrl *ecc = &host->chip.ecc;
+ unsigned int ecc_level = p->ecc_level;
+ int sas = p->spare_area_size << p->sector_size_1k;
+ int sectors = p->page_size / (512 << p->sector_size_1k);
+
+ if (p->sector_size_1k)
+ ecc_level <<= 1;
+
+ if (is_hamming_ecc(host->ctrl, p)) {
+ ecc->bytes = 3 * sectors;
+ mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
+ return 0;
+ }
+
+ /*
+ * CONTROLLER_VERSION:
+ * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
+ * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
+ * But we will just be conservative.
+ */
+ ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
+ if (p->page_size == 512)
+ mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
+
+ if (ecc->bytes >= sas) {
+ dev_err(&host->pdev->dev,
+ "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
+ ecc->bytes, sas);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void brcmnand_wp(struct mtd_info *mtd, int wp)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+
+ if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
+ static int old_wp = -1;
+ int ret;
+
+ if (old_wp != wp) {
+ dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
+ old_wp = wp;
+ }
+
+ /*
+ * make sure ctrl/flash ready before and after
+ * changing state of #WP pin
+ */
+ ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
+ NAND_STATUS_READY,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY, 0);
+ if (ret)
+ return;
+
+ brcmnand_set_wp(ctrl, wp);
+ nand_status_op(chip, NULL);
+ /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
+ ret = bcmnand_ctrl_poll_status(ctrl,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY |
+ NAND_STATUS_WP,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY |
+ (wp ? 0 : NAND_STATUS_WP), 0);
+
+ if (ret)
+ dev_err_ratelimited(&host->pdev->dev,
+ "nand #WP expected %s\n",
+ wp ? "on" : "off");
+ }
+}
+
+/* Helper functions for reading and writing OOB registers */
+static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
+{
+ u16 offset0, offset10, reg_offs;
+
+ offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
+ offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
+
+ if (offs >= ctrl->max_oob)
+ return 0x77;
+
+ if (offs >= 16 && offset10)
+ reg_offs = offset10 + ((offs - 0x10) & ~0x03);
+ else
+ reg_offs = offset0 + (offs & ~0x03);
+
+ return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
+}
+
+static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
+ u32 data)
+{
+ u16 offset0, offset10, reg_offs;
+
+ offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
+ offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
+
+ if (offs >= ctrl->max_oob)
+ return;
+
+ if (offs >= 16 && offset10)
+ reg_offs = offset10 + ((offs - 0x10) & ~0x03);
+ else
+ reg_offs = offset0 + (offs & ~0x03);
+
+ nand_writereg(ctrl, reg_offs, data);
+}
+
+/*
+ * read_oob_from_regs - read data from OOB registers
+ * @ctrl: NAND controller
+ * @i: sub-page sector index
+ * @oob: buffer to read to
+ * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
+ * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
+ */
+static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
+ int sas, int sector_1k)
+{
+ int tbytes = sas << sector_1k;
+ int j;
+
+ /* Adjust OOB values for 1K sector size */
+ if (sector_1k && (i & 0x01))
+ tbytes = max(0, tbytes - (int)ctrl->max_oob);
+ tbytes = min_t(int, tbytes, ctrl->max_oob);
+
+ for (j = 0; j < tbytes; j++)
+ oob[j] = oob_reg_read(ctrl, j);
+ return tbytes;
+}
+
+/*
+ * write_oob_to_regs - write data to OOB registers
+ * @i: sub-page sector index
+ * @oob: buffer to write from
+ * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
+ * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
+ */
+static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
+ const u8 *oob, int sas, int sector_1k)
+{
+ int tbytes = sas << sector_1k;
+ int j, k = 0;
+ u32 last = 0xffffffff;
+ u8 *plast = (u8 *)&last;
+
+ /* Adjust OOB values for 1K sector size */
+ if (sector_1k && (i & 0x01))
+ tbytes = max(0, tbytes - (int)ctrl->max_oob);
+ tbytes = min_t(int, tbytes, ctrl->max_oob);
+
+ /*
+ * tbytes may not be multiple of words. Make sure we don't read out of
+ * the boundary and stop at last word.
+ */
+ for (j = 0; (j + 3) < tbytes; j += 4)
+ oob_reg_write(ctrl, j,
+ (oob[j + 0] << 24) |
+ (oob[j + 1] << 16) |
+ (oob[j + 2] << 8) |
+ (oob[j + 3] << 0));
+
+ /* handle the remaing bytes */
+ while (j < tbytes)
+ plast[k++] = oob[j++];
+
+ if (tbytes & 0x3)
+ oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
+
+ return tbytes;
+}
+
+static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
+{
+ /* initialize edu */
+ edu_writel(ctrl, EDU_ERR_STATUS, 0);
+ edu_readl(ctrl, EDU_ERR_STATUS);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_readl(ctrl, EDU_DONE);
+}
+
+/* edu irq */
+static irqreturn_t brcmnand_edu_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ if (ctrl->edu_count) {
+ ctrl->edu_count--;
+ while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
+ udelay(1);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_readl(ctrl, EDU_DONE);
+ }
+
+ if (ctrl->edu_count) {
+ ctrl->edu_dram_addr += FC_BYTES;
+ ctrl->edu_ext_addr += FC_BYTES;
+
+ edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+ edu_readl(ctrl, EDU_DRAM_ADDR);
+ edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+ edu_readl(ctrl, EDU_EXT_ADDR);
+
+ mb(); /* flush previous writes */
+ edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+ edu_readl(ctrl, EDU_CMD);
+
+ return IRQ_HANDLED;
+ }
+
+ complete(&ctrl->edu_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ /* Discard all NAND_CTLRDY interrupts during DMA */
+ if (ctrl->dma_pending)
+ return IRQ_HANDLED;
+
+ /* check if you need to piggy back on the ctrlrdy irq */
+ if (ctrl->edu_pending) {
+ if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
+ /* Discard interrupts while using dedicated edu irq */
+ return IRQ_HANDLED;
+
+ /* no registered edu irq, call handler */
+ return brcmnand_edu_irq(irq, data);
+ }
+
+ complete(&ctrl->done);
+ return IRQ_HANDLED;
+}
+
+/* Handle SoC-specific interrupt hardware */
+static irqreturn_t brcmnand_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ if (ctrl->soc->ctlrdy_ack(ctrl->soc))
+ return brcmnand_ctlrdy_irq(irq, data);
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t brcmnand_dma_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ complete(&ctrl->dma_done);
+
+ return IRQ_HANDLED;
+}
+
+static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int ret;
+ u64 cmd_addr;
+
+ cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+
+ dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
+
+ /*
+ * If we came here through _panic_write and there is a pending
+ * command, try to wait for it. If it times out, rather than
+ * hitting BUG_ON, just return so we don't crash while crashing.
+ */
+ if (oops_in_progress) {
+ if (ctrl->cmd_pending &&
+ bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
+ return;
+ } else
+ BUG_ON(ctrl->cmd_pending != 0);
+ ctrl->cmd_pending = cmd;
+
+ ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ WARN_ON(ret);
+
+ mb(); /* flush previous writes */
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
+ cmd << brcmnand_cmd_shift(ctrl));
+}
+
+/***********************************************************************
+ * NAND MTD API: read/program/erase
+ ***********************************************************************/
+
+static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
+ unsigned int ctrl)
+{
+ /* intentionally left blank */
+}
+
+static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ bool err = false;
+ int sts;
+
+ if (mtd->oops_panic_write) {
+ /* switch to interrupt polling and PIO mode */
+ disable_ctrl_irqs(ctrl);
+ sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
+ NAND_CTRL_RDY, 0);
+ err = (sts < 0) ? true : false;
+ } else {
+ unsigned long timeo = msecs_to_jiffies(
+ NAND_POLL_STATUS_TIMEOUT_MS);
+ /* wait for completion interrupt */
+ sts = wait_for_completion_timeout(&ctrl->done, timeo);
+ err = (sts <= 0) ? true : false;
+ }
+
+ return err;
+}
+
+static int brcmnand_waitfunc(struct nand_chip *chip)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ bool err = false;
+
+ dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
+ if (ctrl->cmd_pending)
+ err = brcmstb_nand_wait_for_completion(chip);
+
+ if (err) {
+ u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
+ >> brcmnand_cmd_shift(ctrl);
+
+ dev_err_ratelimited(ctrl->dev,
+ "timeout waiting for command %#02x\n", cmd);
+ dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
+ brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
+ }
+ ctrl->cmd_pending = 0;
+ return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS;
+}
+
+enum {
+ LLOP_RE = BIT(16),
+ LLOP_WE = BIT(17),
+ LLOP_ALE = BIT(18),
+ LLOP_CLE = BIT(19),
+ LLOP_RETURN_IDLE = BIT(31),
+
+ LLOP_DATA_MASK = GENMASK(15, 0),
+};
+
+static int brcmnand_low_level_op(struct brcmnand_host *host,
+ enum brcmnand_llop_type type, u32 data,
+ bool last_op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u32 tmp;
+
+ tmp = data & LLOP_DATA_MASK;
+ switch (type) {
+ case LL_OP_CMD:
+ tmp |= LLOP_WE | LLOP_CLE;
+ break;
+ case LL_OP_ADDR:
+ /* WE | ALE */
+ tmp |= LLOP_WE | LLOP_ALE;
+ break;
+ case LL_OP_WR:
+ /* WE */
+ tmp |= LLOP_WE;
+ break;
+ case LL_OP_RD:
+ /* RE */
+ tmp |= LLOP_RE;
+ break;
+ }
+ if (last_op)
+ /* RETURN_IDLE */
+ tmp |= LLOP_RETURN_IDLE;
+
+ dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
+
+ brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
+
+ brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
+ return brcmnand_waitfunc(chip);
+}
+
+static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u64 addr = (u64)page_addr << chip->page_shift;
+ int native_cmd = 0;
+
+ if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
+ command == NAND_CMD_RNDOUT)
+ addr = (u64)column;
+ /* Avoid propagating a negative, don't-care address */
+ else if (page_addr < 0)
+ addr = 0;
+
+ dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
+ (unsigned long long)addr);
+
+ host->last_cmd = command;
+ host->last_byte = 0;
+ host->last_addr = addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ native_cmd = CMD_FLASH_RESET;
+ break;
+ case NAND_CMD_STATUS:
+ native_cmd = CMD_STATUS_READ;
+ break;
+ case NAND_CMD_READID:
+ native_cmd = CMD_DEVICE_ID_READ;
+ break;
+ case NAND_CMD_READOOB:
+ native_cmd = CMD_SPARE_AREA_READ;
+ break;
+ case NAND_CMD_ERASE1:
+ native_cmd = CMD_BLOCK_ERASE;
+ brcmnand_wp(mtd, 0);
+ break;
+ case NAND_CMD_PARAM:
+ native_cmd = CMD_PARAMETER_READ;
+ break;
+ case NAND_CMD_SET_FEATURES:
+ case NAND_CMD_GET_FEATURES:
+ brcmnand_low_level_op(host, LL_OP_CMD, command, false);
+ brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
+ break;
+ case NAND_CMD_RNDOUT:
+ native_cmd = CMD_PARAMETER_CHANGE_COL;
+ addr &= ~((u64)(FC_BYTES - 1));
+ /*
+ * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
+ * NB: hwcfg.sector_size_1k may not be initialized yet
+ */
+ if (brcmnand_get_sector_size_1k(host)) {
+ host->hwcfg.sector_size_1k =
+ brcmnand_get_sector_size_1k(host);
+ brcmnand_set_sector_size_1k(host, 0);
+ }
+ break;
+ }
+
+ if (!native_cmd)
+ return;
+
+ brcmnand_set_cmd_addr(mtd, addr);
+ brcmnand_send_cmd(host, native_cmd);
+ brcmnand_waitfunc(chip);
+
+ if (native_cmd == CMD_PARAMETER_READ ||
+ native_cmd == CMD_PARAMETER_CHANGE_COL) {
+ /* Copy flash cache word-wise */
+ u32 *flash_cache = (u32 *)ctrl->flash_cache;
+ int i;
+
+ brcmnand_soc_data_bus_prepare(ctrl->soc, true);
+
+ /*
+ * Must cache the FLASH_CACHE now, since changes in
+ * SECTOR_SIZE_1K may invalidate it
+ */
+ for (i = 0; i < FC_WORDS; i++)
+ /*
+ * Flash cache is big endian for parameter pages, at
+ * least on STB SoCs
+ */
+ flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
+
+ /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
+ if (host->hwcfg.sector_size_1k)
+ brcmnand_set_sector_size_1k(host,
+ host->hwcfg.sector_size_1k);
+ }
+
+ /* Re-enable protection is necessary only after erase */
+ if (command == NAND_CMD_ERASE1)
+ brcmnand_wp(mtd, 1);
+}
+
+static uint8_t brcmnand_read_byte(struct nand_chip *chip)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ uint8_t ret = 0;
+ int addr, offs;
+
+ switch (host->last_cmd) {
+ case NAND_CMD_READID:
+ if (host->last_byte < 4)
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
+ (24 - (host->last_byte << 3));
+ else if (host->last_byte < 8)
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
+ (56 - (host->last_byte << 3));
+ break;
+
+ case NAND_CMD_READOOB:
+ ret = oob_reg_read(ctrl, host->last_byte);
+ break;
+
+ case NAND_CMD_STATUS:
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS;
+ if (wp_on) /* hide WP status */
+ ret |= NAND_STATUS_WP;
+ break;
+
+ case NAND_CMD_PARAM:
+ case NAND_CMD_RNDOUT:
+ addr = host->last_addr + host->last_byte;
+ offs = addr & (FC_BYTES - 1);
+
+ /* At FC_BYTES boundary, switch to next column */
+ if (host->last_byte > 0 && offs == 0)
+ nand_change_read_column_op(chip, addr, NULL, 0, false);
+
+ ret = ctrl->flash_cache[offs];
+ break;
+ case NAND_CMD_GET_FEATURES:
+ if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
+ ret = 0;
+ } else {
+ bool last = host->last_byte ==
+ ONFI_SUBFEATURE_PARAM_LEN - 1;
+ brcmnand_low_level_op(host, LL_OP_RD, 0, last);
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
+ }
+ }
+
+ dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
+ host->last_byte++;
+
+ return ret;
+}
+
+static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++, buf++)
+ *buf = brcmnand_read_byte(chip);
+}
+
+static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ int i;
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+
+ switch (host->last_cmd) {
+ case NAND_CMD_SET_FEATURES:
+ for (i = 0; i < len; i++)
+ brcmnand_low_level_op(host, LL_OP_WR, buf[i],
+ (i + 1) == len);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+/**
+ * Kick EDU engine
+ */
+static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned long timeo = msecs_to_jiffies(200);
+ int ret = 0;
+ int dir = (cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ u8 edu_cmd = (cmd == CMD_PAGE_READ ? EDU_CMD_READ : EDU_CMD_WRITE);
+ unsigned int trans = len >> FC_SHIFT;
+ dma_addr_t pa;
+
+ pa = dma_map_single(ctrl->dev, buf, len, dir);
+ if (dma_mapping_error(ctrl->dev, pa)) {
+ dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
+ return -ENOMEM;
+ }
+
+ ctrl->edu_pending = true;
+ ctrl->edu_dram_addr = pa;
+ ctrl->edu_ext_addr = addr;
+ ctrl->edu_cmd = edu_cmd;
+ ctrl->edu_count = trans;
+
+ edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+ edu_readl(ctrl, EDU_DRAM_ADDR);
+ edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+ edu_readl(ctrl, EDU_EXT_ADDR);
+ edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
+ edu_readl(ctrl, EDU_LENGTH);
+
+ /* Start edu engine */
+ mb(); /* flush previous writes */
+ edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+ edu_readl(ctrl, EDU_CMD);
+
+ if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
+ dev_err(ctrl->dev,
+ "timeout waiting for EDU; status %#x, error status %#x\n",
+ edu_readl(ctrl, EDU_STATUS),
+ edu_readl(ctrl, EDU_ERR_STATUS));
+ }
+
+ dma_unmap_single(ctrl->dev, pa, len, dir);
+
+ /* for program page check NAND status */
+ if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS) & NAND_STATUS_FAIL) &&
+ edu_cmd == EDU_CMD_WRITE) {
+ dev_info(ctrl->dev, "program failed at %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ }
+
+ /* Make sure the EDU status is clean */
+ if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
+ dev_warn(ctrl->dev, "EDU still active: %#x\n",
+ edu_readl(ctrl, EDU_STATUS));
+
+ if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
+ dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ }
+
+ ctrl->edu_pending = false;
+ brcmnand_edu_init(ctrl);
+ edu_writel(ctrl, EDU_STOP, 0); /* force stop */
+ edu_readl(ctrl, EDU_STOP);
+
+ if (!ret && edu_cmd == EDU_CMD_READ) {
+ u64 err_addr = 0;
+
+ /*
+ * check for ECC errors here, subpage ECC errors are
+ * retained in ECC error address register
+ */
+ err_addr = brcmnand_get_uncorrecc_addr(ctrl);
+ if (!err_addr) {
+ err_addr = brcmnand_get_correcc_addr(ctrl);
+ if (err_addr)
+ ret = -EUCLEAN;
+ } else
+ ret = -EBADMSG;
+ }
+
+ return ret;
+}
+
+/**
+ * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
+ * following ahead of time:
+ * - Is this descriptor the beginning or end of a linked list?
+ * - What is the (DMA) address of the next descriptor in the linked list?
+ */
+static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
+ struct brcm_nand_dma_desc *desc, u64 addr,
+ dma_addr_t buf, u32 len, u8 dma_cmd,
+ bool begin, bool end,
+ dma_addr_t next_desc)
+{
+ memset(desc, 0, sizeof(*desc));
+ /* Descriptors are written in native byte order (wordwise) */
+ desc->next_desc = lower_32_bits(next_desc);
+ desc->next_desc_ext = upper_32_bits(next_desc);
+ desc->cmd_irq = (dma_cmd << 24) |
+ (end ? (0x03 << 8) : 0) | /* IRQ | STOP */
+ (!!begin) | ((!!end) << 1); /* head, tail */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ desc->cmd_irq |= 0x01 << 12;
+#endif
+ desc->dram_addr = lower_32_bits(buf);
+ desc->dram_addr_ext = upper_32_bits(buf);
+ desc->tfr_len = len;
+ desc->total_len = len;
+ desc->flash_addr = lower_32_bits(addr);
+ desc->flash_addr_ext = upper_32_bits(addr);
+ desc->cs = host->cs;
+ desc->status_valid = 0x01;
+ return 0;
+}
+
+/**
+ * Kick the FLASH_DMA engine, with a given DMA descriptor
+ */
+static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned long timeo = msecs_to_jiffies(100);
+
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
+ if (ctrl->nand_version > 0x0602) {
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
+ upper_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+ }
+
+ /* Start FLASH_DMA engine */
+ ctrl->dma_pending = true;
+ mb(); /* flush previous writes */
+ flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
+
+ if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
+ dev_err(ctrl->dev,
+ "timeout waiting for DMA; status %#x, error status %#x\n",
+ flash_dma_readl(ctrl, FLASH_DMA_STATUS),
+ flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
+ }
+ ctrl->dma_pending = false;
+ flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
+}
+
+static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 dma_cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ dma_addr_t buf_pa;
+ int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
+ if (dma_mapping_error(ctrl->dev, buf_pa)) {
+ dev_err(ctrl->dev, "unable to map buffer for DMA\n");
+ return -ENOMEM;
+ }
+
+ brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
+ dma_cmd, true, true, 0);
+
+ brcmnand_dma_run(host, ctrl->dma_pa);
+
+ dma_unmap_single(ctrl->dev, buf_pa, len, dir);
+
+ if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
+ return -EBADMSG;
+ else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
+ return -EUCLEAN;
+
+ return 0;
+}
+
+/*
+ * Assumes proper CS is already set
+ */
+static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, unsigned int trans, u32 *buf,
+ u8 *oob, u64 *err_addr)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int i, j, ret = 0;
+
+ brcmnand_clear_ecc_addr(ctrl);
+
+ for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ brcmnand_set_cmd_addr(mtd, addr);
+ /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
+ brcmnand_send_cmd(host, CMD_PAGE_READ);
+ brcmnand_waitfunc(chip);
+
+ if (likely(buf)) {
+ brcmnand_soc_data_bus_prepare(ctrl->soc, false);
+
+ for (j = 0; j < FC_WORDS; j++, buf++)
+ *buf = brcmnand_read_fc(ctrl, j);
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
+ }
+
+ if (oob)
+ oob += read_oob_from_regs(ctrl, i, oob,
+ mtd->oobsize / trans,
+ host->hwcfg.sector_size_1k);
+
+ if (ret != -EBADMSG) {
+ *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
+
+ if (*err_addr)
+ ret = -EBADMSG;
+ }
+
+ if (!ret) {
+ *err_addr = brcmnand_get_correcc_addr(ctrl);
+
+ if (*err_addr)
+ ret = -EUCLEAN;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
+ * error
+ *
+ * Because the HW ECC signals an ECC error if an erase paged has even a single
+ * bitflip, we must check each ECC error to see if it is actually an erased
+ * page with bitflips, not a truly corrupted page.
+ *
+ * On a real error, return a negative error code (-EBADMSG for ECC error), and
+ * buf will contain raw data.
+ * Otherwise, buf gets filled with 0xffs and return the maximum number of
+ * bitflips-per-ECC-sector to the caller.
+ *
+ */
+static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
+ struct nand_chip *chip, void *buf, u64 addr)
+{
+ struct mtd_oob_region ecc;
+ int i;
+ int bitflips = 0;
+ int page = addr >> chip->page_shift;
+ int ret;
+ void *ecc_bytes;
+ void *ecc_chunk;
+
+ if (!buf)
+ buf = nand_get_data_buf(chip);
+
+ /* read without ecc for verification */
+ ret = chip->ecc.read_page_raw(chip, buf, true, page);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ ecc_chunk = buf + chip->ecc.size * i;
+
+ mtd_ooblayout_ecc(mtd, i, &ecc);
+ ecc_bytes = chip->oob_poi + ecc.offset;
+
+ ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size,
+ ecc_bytes, ecc.length,
+ NULL, 0,
+ chip->ecc.strength);
+ if (ret < 0)
+ return ret;
+
+ bitflips = max(bitflips, ret);
+ }
+
+ return bitflips;
+}
+
+static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, unsigned int trans, u32 *buf, u8 *oob)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u64 err_addr = 0;
+ int err;
+ bool retry = true;
+ bool edu_err = false;
+
+ dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
+
+try_dmaread:
+ brcmnand_clear_ecc_addr(ctrl);
+
+ if (ctrl->dma_trans && !oob && flash_dma_buf_ok(buf)) {
+ err = ctrl->dma_trans(host, addr, buf,
+ trans * FC_BYTES,
+ CMD_PAGE_READ);
+
+ if (err) {
+ if (mtd_is_bitflip_or_eccerr(err))
+ err_addr = addr;
+ else
+ return -EIO;
+ }
+
+ if (has_edu(ctrl) && err_addr)
+ edu_err = true;
+
+ } else {
+ if (oob)
+ memset(oob, 0x99, mtd->oobsize);
+
+ err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
+ oob, &err_addr);
+ }
+
+ if (mtd_is_eccerr(err)) {
+ /*
+ * On controller version and 7.0, 7.1 , DMA read after a
+ * prior PIO read that reported uncorrectable error,
+ * the DMA engine captures this error following DMA read
+ * cleared only on subsequent DMA read, so just retry once
+ * to clear a possible false error reported for current DMA
+ * read
+ */
+ if ((ctrl->nand_version == 0x0700) ||
+ (ctrl->nand_version == 0x0701)) {
+ if (retry) {
+ retry = false;
+ goto try_dmaread;
+ }
+ }
+
+ /*
+ * Controller version 7.2 has hw encoder to detect erased page
+ * bitflips, apply sw verification for older controllers only
+ */
+ if (ctrl->nand_version < 0x0702) {
+ err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
+ addr);
+ /* erased page bitflips corrected */
+ if (err >= 0)
+ return err;
+ }
+
+ dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
+ (unsigned long long)err_addr);
+ mtd->ecc_stats.failed++;
+ /* NAND layer expects zero on ECC errors */
+ return 0;
+ }
+
+ if (mtd_is_bitflip(err)) {
+ unsigned int corrected = brcmnand_count_corrected(ctrl);
+
+ /* in case of EDU correctable error we read again using PIO */
+ if (edu_err)
+ err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
+ oob, &err_addr);
+
+ dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
+ (unsigned long long)err_addr);
+ mtd->ecc_stats.corrected += corrected;
+ /* Always exceed the software-imposed threshold */
+ return max(mtd->bitflip_threshold, corrected);
+ }
+
+ return 0;
+}
+
+static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ return brcmnand_read(mtd, chip, host->last_addr,
+ mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+}
+
+static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ int ret;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ brcmnand_set_ecc_enabled(host, 0);
+ ret = brcmnand_read(mtd, chip, host->last_addr,
+ mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ brcmnand_set_ecc_enabled(host, 1);
+ return ret;
+}
+
+static int brcmnand_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
+ mtd->writesize >> FC_SHIFT,
+ NULL, (u8 *)chip->oob_poi);
+}
+
+static int brcmnand_read_oob_raw(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+
+ brcmnand_set_ecc_enabled(host, 0);
+ brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
+ mtd->writesize >> FC_SHIFT,
+ NULL, (u8 *)chip->oob_poi);
+ brcmnand_set_ecc_enabled(host, 1);
+ return 0;
+}
+
+static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, const u32 *buf, u8 *oob)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
+ int status, ret = 0;
+
+ dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
+
+ if (unlikely((unsigned long)buf & 0x03)) {
+ dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
+ buf = (u32 *)((unsigned long)buf & ~0x03);
+ }
+
+ brcmnand_wp(mtd, 0);
+
+ for (i = 0; i < ctrl->max_oob; i += 4)
+ oob_reg_write(ctrl, i, 0xffffffff);
+
+ if (use_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+ if (ctrl->dma_trans(host, addr, (u32 *)buf, mtd->writesize,
+ CMD_PROGRAM_PAGE))
+
+ ret = -EIO;
+
+ goto out;
+ }
+
+ for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ /* full address MUST be set before populating FC */
+ brcmnand_set_cmd_addr(mtd, addr);
+
+ if (buf) {
+ brcmnand_soc_data_bus_prepare(ctrl->soc, false);
+
+ for (j = 0; j < FC_WORDS; j++, buf++)
+ brcmnand_write_fc(ctrl, j, *buf);
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
+ } else if (oob) {
+ for (j = 0; j < FC_WORDS; j++)
+ brcmnand_write_fc(ctrl, j, 0xffffffff);
+ }
+
+ if (oob) {
+ oob += write_oob_to_regs(ctrl, i, oob,
+ mtd->oobsize / trans,
+ host->hwcfg.sector_size_1k);
+ }
+
+ /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
+ brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
+ status = brcmnand_waitfunc(chip);
+
+ if (status & NAND_STATUS_FAIL) {
+ dev_info(ctrl->dev, "program failed at %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ goto out;
+ }
+ }
+out:
+ brcmnand_wp(mtd, 1);
+ return ret;
+}
+
+static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ void *oob = oob_required ? chip->oob_poi : NULL;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ void *oob = oob_required ? chip->oob_poi : NULL;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ brcmnand_set_ecc_enabled(host, 0);
+ brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ brcmnand_set_ecc_enabled(host, 1);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int brcmnand_write_oob(struct nand_chip *chip, int page)
+{
+ return brcmnand_write(nand_to_mtd(chip), chip,
+ (u64)page << chip->page_shift, NULL,
+ chip->oob_poi);
+}
+
+static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ brcmnand_set_ecc_enabled(host, 0);
+ ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
+ (u8 *)chip->oob_poi);
+ brcmnand_set_ecc_enabled(host, 1);
+
+ return ret;
+}
+
+/***********************************************************************
+ * Per-CS setup (1 NAND device)
+ ***********************************************************************/
+
+static int brcmnand_set_cfg(struct brcmnand_host *host,
+ struct brcmnand_cfg *cfg)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct nand_chip *chip = &host->chip;
+ u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_CFG_EXT);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u8 block_size = 0, page_size = 0, device_size = 0;
+ u32 tmp;
+
+ if (ctrl->block_sizes) {
+ int i, found;
+
+ for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
+ if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
+ block_size = i;
+ found = 1;
+ }
+ if (!found) {
+ dev_warn(ctrl->dev, "invalid block size %u\n",
+ cfg->block_size);
+ return -EINVAL;
+ }
+ } else {
+ block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
+ }
+
+ if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
+ cfg->block_size > ctrl->max_block_size)) {
+ dev_warn(ctrl->dev, "invalid block size %u\n",
+ cfg->block_size);
+ block_size = 0;
+ }
+
+ if (ctrl->page_sizes) {
+ int i, found;
+
+ for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
+ if (ctrl->page_sizes[i] == cfg->page_size) {
+ page_size = i;
+ found = 1;
+ }
+ if (!found) {
+ dev_warn(ctrl->dev, "invalid page size %u\n",
+ cfg->page_size);
+ return -EINVAL;
+ }
+ } else {
+ page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
+ }
+
+ if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
+ cfg->page_size > ctrl->max_page_size)) {
+ dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
+ return -EINVAL;
+ }
+
+ if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
+ dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
+ (unsigned long long)cfg->device_size);
+ return -EINVAL;
+ }
+ device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
+
+ tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
+ (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
+ (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
+ (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
+ (device_size << CFG_DEVICE_SIZE_SHIFT);
+ if (cfg_offs == cfg_ext_offs) {
+ tmp |= (page_size << ctrl->page_size_shift) |
+ (block_size << CFG_BLK_SIZE_SHIFT);
+ nand_writereg(ctrl, cfg_offs, tmp);
+ } else {
+ nand_writereg(ctrl, cfg_offs, tmp);
+ tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
+ (block_size << CFG_EXT_BLK_SIZE_SHIFT);
+ nand_writereg(ctrl, cfg_ext_offs, tmp);
+ }
+
+ tmp = nand_readreg(ctrl, acc_control_offs);
+ tmp &= ~brcmnand_ecc_level_mask(ctrl);
+ tmp &= ~brcmnand_spare_area_mask(ctrl);
+ if (ctrl->nand_version >= 0x0302) {
+ tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
+ tmp |= cfg->spare_area_size;
+ }
+ nand_writereg(ctrl, acc_control_offs, tmp);
+
+ brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
+
+ /* threshold = ceil(BCH-level * 0.75) */
+ brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
+
+ return 0;
+}
+
+static void brcmnand_print_cfg(struct brcmnand_host *host,
+ char *buf, struct brcmnand_cfg *cfg)
+{
+ buf += sprintf(buf,
+ "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
+ (unsigned long long)cfg->device_size >> 20,
+ cfg->block_size >> 10,
+ cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
+ cfg->page_size >= 1024 ? "KiB" : "B",
+ cfg->spare_area_size, cfg->device_width);
+
+ /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
+ if (is_hamming_ecc(host->ctrl, cfg))
+ sprintf(buf, ", Hamming ECC");
+ else if (cfg->sector_size_1k)
+ sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
+ else
+ sprintf(buf, ", BCH-%u", cfg->ecc_level);
+}
+
+/*
+ * Minimum number of bytes to address a page. Calculated as:
+ * roundup(log2(size / page-size) / 8)
+ *
+ * NB: the following does not "round up" for non-power-of-2 'size'; but this is
+ * OK because many other things will break if 'size' is irregular...
+ */
+static inline int get_blk_adr_bytes(u64 size, u32 writesize)
+{
+ return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
+}
+
+static int brcmnand_setup_dev(struct brcmnand_host *host)
+{
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+ struct nand_chip *chip = &host->chip;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct nand_memory_organization *memorg =
+ nanddev_get_memorg(&chip->base);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ char msg[128];
+ u32 offs, tmp, oob_sector;
+ int ret;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ ret = of_property_read_u32(nand_get_flash_node(chip),
+ "brcm,nand-oob-sector-size",
+ &oob_sector);
+ if (ret) {
+ /* Use detected size */
+ cfg->spare_area_size = mtd->oobsize /
+ (mtd->writesize >> FC_SHIFT);
+ } else {
+ cfg->spare_area_size = oob_sector;
+ }
+ if (cfg->spare_area_size > ctrl->max_oob)
+ cfg->spare_area_size = ctrl->max_oob;
+ /*
+ * Set mtd and memorg oobsize to be consistent with controller's
+ * spare_area_size, as the rest is inaccessible.
+ */
+ mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
+ memorg->oobsize = mtd->oobsize;
+
+ cfg->device_size = mtd->size;
+ cfg->block_size = mtd->erasesize;
+ cfg->page_size = mtd->writesize;
+ cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
+ cfg->col_adr_bytes = 2;
+ cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
+ chip->ecc.engine_type);
+ return -EINVAL;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
+ if (chip->ecc.strength == 1 && chip->ecc.size == 512)
+ /* Default to Hamming for 1-bit ECC, if unspecified */
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ else
+ /* Otherwise, BCH */
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING &&
+ (chip->ecc.strength != 1 || chip->ecc.size != 512)) {
+ dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
+ chip->ecc.strength, chip->ecc.size);
+ return -EINVAL;
+ }
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+ (!chip->ecc.size || !chip->ecc.strength)) {
+ if (requirements->step_size && requirements->strength) {
+ /* use detected ECC parameters */
+ chip->ecc.size = requirements->step_size;
+ chip->ecc.strength = requirements->strength;
+ dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
+ chip->ecc.size, chip->ecc.strength);
+ }
+ }
+
+ switch (chip->ecc.size) {
+ case 512:
+ if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
+ cfg->ecc_level = 15;
+ else
+ cfg->ecc_level = chip->ecc.strength;
+ cfg->sector_size_1k = 0;
+ break;
+ case 1024:
+ if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
+ dev_err(ctrl->dev, "1KB sectors not supported\n");
+ return -EINVAL;
+ }
+ if (chip->ecc.strength & 0x1) {
+ dev_err(ctrl->dev,
+ "odd ECC not supported with 1KB sectors\n");
+ return -EINVAL;
+ }
+
+ cfg->ecc_level = chip->ecc.strength >> 1;
+ cfg->sector_size_1k = 1;
+ break;
+ default:
+ dev_err(ctrl->dev, "unsupported ECC size: %d\n",
+ chip->ecc.size);
+ return -EINVAL;
+ }
+
+ cfg->ful_adr_bytes = cfg->blk_adr_bytes;
+ if (mtd->writesize > 512)
+ cfg->ful_adr_bytes += cfg->col_adr_bytes;
+ else
+ cfg->ful_adr_bytes += 1;
+
+ ret = brcmnand_set_cfg(host, cfg);
+ if (ret)
+ return ret;
+
+ brcmnand_set_ecc_enabled(host, 1);
+
+ brcmnand_print_cfg(host, msg, cfg);
+ dev_info(ctrl->dev, "detected %s\n", msg);
+
+ /* Configure ACC_CONTROL */
+ offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
+ tmp = nand_readreg(ctrl, offs);
+ tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
+ tmp &= ~ACC_CONTROL_RD_ERASED;
+
+ /* We need to turn on Read from erased paged protected by ECC */
+ if (ctrl->nand_version >= 0x0702)
+ tmp |= ACC_CONTROL_RD_ERASED;
+ tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
+ if (ctrl->features & BRCMNAND_HAS_PREFETCH)
+ tmp &= ~ACC_CONTROL_PREFETCH;
+
+ nand_writereg(ctrl, offs, tmp);
+
+ return 0;
+}
+
+static int brcmnand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ /*
+ * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
+ * to/from, and have nand_base pass us a bounce buffer instead, as
+ * needed.
+ */
+ chip->options |= NAND_USES_DMA;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (brcmnand_setup_dev(host))
+ return -ENXIO;
+
+ chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
+
+ /* only use our internal HW threshold */
+ mtd->bitflip_threshold = 1;
+
+ ret = brcmstb_choose_ecc_layout(host);
+
+ /* If OOB is written with ECC enabled it will cause ECC errors */
+ if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
+ chip->ecc.write_oob = brcmnand_write_oob_raw;
+ chip->ecc.read_oob = brcmnand_read_oob_raw;
+ }
+
+ return ret;
+}
+
+static const struct nand_controller_ops brcmnand_controller_ops = {
+ .attach_chip = brcmnand_attach_chip,
+};
+
+static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct platform_device *pdev = host->pdev;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int ret;
+ u16 cfg_offs;
+
+ ret = of_property_read_u32(dn, "reg", &host->cs);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get chip-select\n");
+ return -ENXIO;
+ }
+
+ mtd = nand_to_mtd(&host->chip);
+ chip = &host->chip;
+
+ nand_set_flash_node(chip, dn);
+ nand_set_controller_data(chip, host);
+ mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
+ host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+
+ chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
+ chip->legacy.cmdfunc = brcmnand_cmdfunc;
+ chip->legacy.waitfunc = brcmnand_waitfunc;
+ chip->legacy.read_byte = brcmnand_read_byte;
+ chip->legacy.read_buf = brcmnand_read_buf;
+ chip->legacy.write_buf = brcmnand_write_buf;
+
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.read_page = brcmnand_read_page;
+ chip->ecc.write_page = brcmnand_write_page;
+ chip->ecc.read_page_raw = brcmnand_read_page_raw;
+ chip->ecc.write_page_raw = brcmnand_write_page_raw;
+ chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
+ chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
+ chip->ecc.read_oob = brcmnand_read_oob;
+ chip->ecc.write_oob = brcmnand_write_oob;
+
+ chip->controller = &ctrl->controller;
+
+ /*
+ * The bootloader might have configured 16bit mode but
+ * NAND READID command only works in 8bit mode. We force
+ * 8bit mode here to ensure that NAND READID commands works.
+ */
+ cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ nand_writereg(ctrl, cfg_offs,
+ nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ nand_cleanup(chip);
+
+ return ret;
+}
+
+static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
+ int restore)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_CFG_EXT);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
+ u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
+
+ if (restore) {
+ nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
+ if (cfg_offs != cfg_ext_offs)
+ nand_writereg(ctrl, cfg_ext_offs,
+ host->hwcfg.config_ext);
+ nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
+ nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
+ nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
+ } else {
+ host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
+ if (cfg_offs != cfg_ext_offs)
+ host->hwcfg.config_ext =
+ nand_readreg(ctrl, cfg_ext_offs);
+ host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
+ host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
+ host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
+ }
+}
+
+static int brcmnand_suspend(struct device *dev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
+ struct brcmnand_host *host;
+
+ list_for_each_entry(host, &ctrl->host_list, node)
+ brcmnand_save_restore_cs_config(host, 0);
+
+ ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
+ ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
+ ctrl->corr_stat_threshold =
+ brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
+
+ if (has_flash_dma(ctrl))
+ ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
+ else if (has_edu(ctrl))
+ ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
+
+ return 0;
+}
+
+static int brcmnand_resume(struct device *dev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
+ struct brcmnand_host *host;
+
+ if (has_flash_dma(ctrl)) {
+ flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
+ flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
+ }
+
+ if (has_edu(ctrl)) {
+ ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
+ edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
+ edu_readl(ctrl, EDU_CONFIG);
+ brcmnand_edu_init(ctrl);
+ }
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
+ brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
+ ctrl->corr_stat_threshold);
+ if (ctrl->soc) {
+ /* Clear/re-enable interrupt */
+ ctrl->soc->ctlrdy_ack(ctrl->soc);
+ ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+ }
+
+ list_for_each_entry(host, &ctrl->host_list, node) {
+ struct nand_chip *chip = &host->chip;
+
+ brcmnand_save_restore_cs_config(host, 1);
+
+ /* Reset the chip, required by some chips after power-up */
+ nand_reset_op(chip);
+ }
+
+ return 0;
+}
+
+const struct dev_pm_ops brcmnand_pm_ops = {
+ .suspend = brcmnand_suspend,
+ .resume = brcmnand_resume,
+};
+EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
+
+static const struct of_device_id brcmnand_of_match[] = {
+ { .compatible = "brcm,brcmnand-v2.1" },
+ { .compatible = "brcm,brcmnand-v2.2" },
+ { .compatible = "brcm,brcmnand-v4.0" },
+ { .compatible = "brcm,brcmnand-v5.0" },
+ { .compatible = "brcm,brcmnand-v6.0" },
+ { .compatible = "brcm,brcmnand-v6.1" },
+ { .compatible = "brcm,brcmnand-v6.2" },
+ { .compatible = "brcm,brcmnand-v7.0" },
+ { .compatible = "brcm,brcmnand-v7.1" },
+ { .compatible = "brcm,brcmnand-v7.2" },
+ { .compatible = "brcm,brcmnand-v7.3" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmnand_of_match);
+
+/***********************************************************************
+ * Platform driver setup (per controller)
+ ***********************************************************************/
+static int brcmnand_edu_setup(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu");
+ if (res) {
+ ctrl->edu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->edu_base))
+ return PTR_ERR(ctrl->edu_base);
+
+ ctrl->edu_offsets = edu_regs;
+
+ edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
+ EDU_CONFIG_SWAP_CFG);
+ edu_readl(ctrl, EDU_CONFIG);
+
+ /* initialize edu */
+ brcmnand_edu_init(ctrl);
+
+ ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
+ if (ctrl->edu_irq < 0) {
+ dev_warn(dev,
+ "FLASH EDU enabled, using ctlrdy irq\n");
+ } else {
+ ret = devm_request_irq(dev, ctrl->edu_irq,
+ brcmnand_edu_irq, 0,
+ "brcmnand-edu", ctrl);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->edu_irq, ret);
+ return ret;
+ }
+
+ dev_info(dev, "FLASH EDU enabled using irq %u\n",
+ ctrl->edu_irq);
+ }
+ }
+
+ return 0;
+}
+
+int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node, *child;
+ struct brcmnand_controller *ctrl;
+ struct resource *res;
+ int ret;
+
+ /* We only support device-tree instantiation */
+ if (!dn)
+ return -ENODEV;
+
+ if (!of_match_node(brcmnand_of_match, dn))
+ return -ENODEV;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, ctrl);
+ ctrl->dev = dev;
+
+ /* Enable the static key if the soc provides I/O operations indicating
+ * that a non-memory mapped IO access path must be used
+ */
+ if (brcmnand_soc_has_ops(ctrl->soc))
+ static_branch_enable(&brcmnand_soc_has_ops_key);
+
+ init_completion(&ctrl->done);
+ init_completion(&ctrl->dma_done);
+ init_completion(&ctrl->edu_done);
+ nand_controller_init(&ctrl->controller);
+ ctrl->controller.ops = &brcmnand_controller_ops;
+ INIT_LIST_HEAD(&ctrl->host_list);
+
+ /* NAND register range */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctrl->nand_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->nand_base))
+ return PTR_ERR(ctrl->nand_base);
+
+ /* Enable clock before using NAND registers */
+ ctrl->clk = devm_clk_get(dev, "nand");
+ if (!IS_ERR(ctrl->clk)) {
+ ret = clk_prepare_enable(ctrl->clk);
+ if (ret)
+ return ret;
+ } else {
+ ret = PTR_ERR(ctrl->clk);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ ctrl->clk = NULL;
+ }
+
+ /* Initialize NAND revision */
+ ret = brcmnand_revision_init(ctrl);
+ if (ret)
+ goto err;
+
+ /*
+ * Most chips have this cache at a fixed offset within 'nand' block.
+ * Some must specify this region separately.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
+ if (res) {
+ ctrl->nand_fc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->nand_fc)) {
+ ret = PTR_ERR(ctrl->nand_fc);
+ goto err;
+ }
+ } else {
+ ctrl->nand_fc = ctrl->nand_base +
+ ctrl->reg_offsets[BRCMNAND_FC_BASE];
+ }
+
+ /* FLASH_DMA */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
+ if (res) {
+ ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->flash_dma_base)) {
+ ret = PTR_ERR(ctrl->flash_dma_base);
+ goto err;
+ }
+
+ /* initialize the dma version */
+ brcmnand_flash_dma_revision_init(ctrl);
+
+ ret = -EIO;
+ if (ctrl->nand_version >= 0x0700)
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(40));
+ if (ret)
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (ret)
+ goto err;
+
+ /* linked-list and stop on error */
+ flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
+ flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
+
+ /* Allocate descriptor(s) */
+ ctrl->dma_desc = dmam_alloc_coherent(dev,
+ sizeof(*ctrl->dma_desc),
+ &ctrl->dma_pa, GFP_KERNEL);
+ if (!ctrl->dma_desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ctrl->dma_irq = platform_get_irq(pdev, 1);
+ if ((int)ctrl->dma_irq < 0) {
+ dev_err(dev, "missing FLASH_DMA IRQ\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ret = devm_request_irq(dev, ctrl->dma_irq,
+ brcmnand_dma_irq, 0, DRV_NAME,
+ ctrl);
+ if (ret < 0) {
+ dev_err(dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->dma_irq, ret);
+ goto err;
+ }
+
+ dev_info(dev, "enabling FLASH_DMA\n");
+ /* set flash dma transfer function to call */
+ ctrl->dma_trans = brcmnand_dma_trans;
+ } else {
+ ret = brcmnand_edu_setup(pdev);
+ if (ret < 0)
+ goto err;
+
+ if (has_edu(ctrl))
+ /* set edu transfer function to call */
+ ctrl->dma_trans = brcmnand_edu_trans;
+ }
+
+ /* Disable automatic device ID config, direct addressing */
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
+ CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
+ /* Disable XOR addressing */
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
+
+ if (ctrl->features & BRCMNAND_HAS_WP) {
+ /* Permanently disable write protection */
+ if (wp_on == 2)
+ brcmnand_set_wp(ctrl, false);
+ } else {
+ wp_on = 0;
+ }
+
+ /* IRQ */
+ ctrl->irq = platform_get_irq(pdev, 0);
+ if ((int)ctrl->irq < 0) {
+ dev_err(dev, "no IRQ defined\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /*
+ * Some SoCs integrate this controller (e.g., its interrupt bits) in
+ * interesting ways
+ */
+ if (soc) {
+ ctrl->soc = soc;
+
+ ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
+ DRV_NAME, ctrl);
+
+ /* Enable interrupt */
+ ctrl->soc->ctlrdy_ack(ctrl->soc);
+ ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+ } else {
+ /* Use standard interrupt infrastructure */
+ ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
+ DRV_NAME, ctrl);
+ }
+ if (ret < 0) {
+ dev_err(dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->irq, ret);
+ goto err;
+ }
+
+ for_each_available_child_of_node(dn, child) {
+ if (of_device_is_compatible(child, "brcm,nandcs")) {
+ struct brcmnand_host *host;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ of_node_put(child);
+ ret = -ENOMEM;
+ goto err;
+ }
+ host->pdev = pdev;
+ host->ctrl = ctrl;
+
+ ret = brcmnand_init_cs(host, child);
+ if (ret) {
+ devm_kfree(dev, host);
+ continue; /* Try all chip-selects */
+ }
+
+ list_add_tail(&host->node, &ctrl->host_list);
+ }
+ }
+
+ /* No chip-selects could initialize properly */
+ if (list_empty(&ctrl->host_list)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ clk_disable_unprepare(ctrl->clk);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(brcmnand_probe);
+
+int brcmnand_remove(struct platform_device *pdev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+ struct brcmnand_host *host;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry(host, &ctrl->host_list, node) {
+ chip = &host->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ }
+
+ clk_disable_unprepare(ctrl->clk);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(brcmnand_remove);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kevin Cernekee");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for Broadcom chips");
+MODULE_ALIAS("platform:brcmnand");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
new file mode 100644
index 000000000..f1f93d85f
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#ifndef __BRCMNAND_H__
+#define __BRCMNAND_H__
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+struct platform_device;
+struct dev_pm_ops;
+struct brcmnand_io_ops;
+
+/* Special register offset constant to intercept a non-MMIO access
+ * to the flash cache register space. This is intentionally large
+ * not to overlap with an existing offset.
+ */
+#define BRCMNAND_NON_MMIO_FC_ADDR 0xffffffff
+
+struct brcmnand_soc {
+ bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
+ void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
+ void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
+ bool is_param);
+ const struct brcmnand_io_ops *ops;
+};
+
+struct brcmnand_io_ops {
+ u32 (*read_reg)(struct brcmnand_soc *soc, u32 offset);
+ void (*write_reg)(struct brcmnand_soc *soc, u32 val, u32 offset);
+};
+
+static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc,
+ bool is_param)
+{
+ if (soc && soc->prepare_data_bus)
+ soc->prepare_data_bus(soc, true, is_param);
+}
+
+static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc,
+ bool is_param)
+{
+ if (soc && soc->prepare_data_bus)
+ soc->prepare_data_bus(soc, false, is_param);
+}
+
+static inline u32 brcmnand_readl(void __iomem *addr)
+{
+ /*
+ * MIPS endianness is configured by boot strap, which also reverses all
+ * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+ * endian I/O).
+ *
+ * Other architectures (e.g., ARM) either do not support big endian, or
+ * else leave I/O in little endian mode.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(addr);
+ else
+ return readl_relaxed(addr);
+}
+
+static inline void brcmnand_writel(u32 val, void __iomem *addr)
+{
+ /* See brcmnand_readl() comments */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, addr);
+ else
+ writel_relaxed(val, addr);
+}
+
+static inline bool brcmnand_soc_has_ops(struct brcmnand_soc *soc)
+{
+ return soc && soc->ops && soc->ops->read_reg && soc->ops->write_reg;
+}
+
+static inline u32 brcmnand_soc_read(struct brcmnand_soc *soc, u32 offset)
+{
+ return soc->ops->read_reg(soc, offset);
+}
+
+static inline void brcmnand_soc_write(struct brcmnand_soc *soc, u32 val,
+ u32 offset)
+{
+ soc->ops->write_reg(soc, val, offset);
+}
+
+int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc);
+int brcmnand_remove(struct platform_device *pdev);
+
+extern const struct dev_pm_ops brcmnand_pm_ops;
+
+#endif /* __BRCMNAND_H__ */
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
new file mode 100644
index 000000000..950923d97
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "brcmnand.h"
+
+static const struct of_device_id brcmstb_nand_of_match[] = {
+ { .compatible = "brcm,brcmnand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmstb_nand_of_match);
+
+static int brcmstb_nand_probe(struct platform_device *pdev)
+{
+ return brcmnand_probe(pdev, NULL);
+}
+
+static struct platform_driver brcmstb_nand_driver = {
+ .probe = brcmstb_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "brcmstb_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = brcmstb_nand_of_match,
+ }
+};
+module_platform_driver(brcmstb_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for Broadcom STB chips");
diff --git a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
new file mode 100644
index 000000000..d32950847
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct iproc_nand_soc {
+ struct brcmnand_soc soc;
+
+ void __iomem *idm_base;
+ void __iomem *ext_base;
+ spinlock_t idm_lock;
+};
+
+#define IPROC_NAND_CTLR_READY_OFFSET 0x10
+#define IPROC_NAND_CTLR_READY BIT(0)
+
+#define IPROC_NAND_IO_CTRL_OFFSET 0x00
+#define IPROC_NAND_APB_LE_MODE BIT(24)
+#define IPROC_NAND_INT_CTRL_READ_ENABLE BIT(6)
+
+static bool iproc_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct iproc_nand_soc *priv =
+ container_of(soc, struct iproc_nand_soc, soc);
+ void __iomem *mmio = priv->ext_base + IPROC_NAND_CTLR_READY_OFFSET;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & IPROC_NAND_CTLR_READY) {
+ brcmnand_writel(IPROC_NAND_CTLR_READY, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void iproc_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct iproc_nand_soc *priv =
+ container_of(soc, struct iproc_nand_soc, soc);
+ void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->idm_lock, flags);
+
+ val = brcmnand_readl(mmio);
+
+ if (en)
+ val |= IPROC_NAND_INT_CTRL_READ_ENABLE;
+ else
+ val &= ~IPROC_NAND_INT_CTRL_READ_ENABLE;
+
+ brcmnand_writel(val, mmio);
+
+ spin_unlock_irqrestore(&priv->idm_lock, flags);
+}
+
+static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare,
+ bool is_param)
+{
+ struct iproc_nand_soc *priv =
+ container_of(soc, struct iproc_nand_soc, soc);
+ void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->idm_lock, flags);
+
+ val = brcmnand_readl(mmio);
+
+ /*
+ * In the case of BE or when dealing with NAND data, alway configure
+ * the APB bus to LE mode before accessing the FIFO and back to BE mode
+ * after the access is done
+ */
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) || !is_param) {
+ if (prepare)
+ val |= IPROC_NAND_APB_LE_MODE;
+ else
+ val &= ~IPROC_NAND_APB_LE_MODE;
+ } else { /* when in LE accessing the parameter page, keep APB in BE */
+ val &= ~IPROC_NAND_APB_LE_MODE;
+ }
+
+ brcmnand_writel(val, mmio);
+
+ spin_unlock_irqrestore(&priv->idm_lock, flags);
+}
+
+static int iproc_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iproc_nand_soc *priv;
+ struct brcmnand_soc *soc;
+ struct resource *res;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ spin_lock_init(&priv->idm_lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-idm");
+ priv->idm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->idm_base))
+ return PTR_ERR(priv->idm_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-ext");
+ priv->ext_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ext_base))
+ return PTR_ERR(priv->ext_base);
+
+ soc->ctlrdy_ack = iproc_nand_intc_ack;
+ soc->ctlrdy_set_enabled = iproc_nand_intc_set;
+ soc->prepare_data_bus = iproc_nand_apb_access;
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id iproc_nand_of_match[] = {
+ { .compatible = "brcm,nand-iproc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, iproc_nand_of_match);
+
+static struct platform_driver iproc_nand_driver = {
+ .probe = iproc_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "iproc_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = iproc_nand_of_match,
+ }
+};
+module_platform_driver(iproc_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_AUTHOR("Ray Jui");
+MODULE_DESCRIPTION("NAND driver for Broadcom IPROC-based SoCs");
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
new file mode 100644
index 000000000..4fdb39214
--- /dev/null
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -0,0 +1,3038 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Cadence NAND flash controller driver
+ *
+ * Copyright (C) 2019 Cadence
+ *
+ * Author: Piotr Sroka <piotrs@cadence.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_device.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+
+/*
+ * HPNFC can work in 3 modes:
+ * - PIO - can work in master or slave DMA
+ * - CDMA - needs Master DMA for accessing command descriptors.
+ * - Generic mode - can use only slave DMA.
+ * CDMA and PIO modes can be used to execute only base commands.
+ * Generic mode can be used to execute any command
+ * on NAND flash memory. Driver uses CDMA mode for
+ * block erasing, page reading, page programing.
+ * Generic mode is used for executing rest of commands.
+ */
+
+#define MAX_ADDRESS_CYC 6
+#define MAX_ERASE_ADDRESS_CYC 3
+#define MAX_DATA_SIZE 0xFFFC
+#define DMA_DATA_SIZE_ALIGN 8
+
+/* Register definition. */
+/*
+ * Command register 0.
+ * Writing data to this register will initiate a new transaction
+ * of the NF controller.
+ */
+#define CMD_REG0 0x0000
+/* Command type field mask. */
+#define CMD_REG0_CT GENMASK(31, 30)
+/* Command type CDMA. */
+#define CMD_REG0_CT_CDMA 0uL
+/* Command type generic. */
+#define CMD_REG0_CT_GEN 3uL
+/* Command thread number field mask. */
+#define CMD_REG0_TN GENMASK(27, 24)
+
+/* Command register 2. */
+#define CMD_REG2 0x0008
+/* Command register 3. */
+#define CMD_REG3 0x000C
+/* Pointer register to select which thread status will be selected. */
+#define CMD_STATUS_PTR 0x0010
+/* Command status register for selected thread. */
+#define CMD_STATUS 0x0014
+
+/* Interrupt status register. */
+#define INTR_STATUS 0x0110
+#define INTR_STATUS_SDMA_ERR BIT(22)
+#define INTR_STATUS_SDMA_TRIGG BIT(21)
+#define INTR_STATUS_UNSUPP_CMD BIT(19)
+#define INTR_STATUS_DDMA_TERR BIT(18)
+#define INTR_STATUS_CDMA_TERR BIT(17)
+#define INTR_STATUS_CDMA_IDL BIT(16)
+
+/* Interrupt enable register. */
+#define INTR_ENABLE 0x0114
+#define INTR_ENABLE_INTR_EN BIT(31)
+#define INTR_ENABLE_SDMA_ERR_EN BIT(22)
+#define INTR_ENABLE_SDMA_TRIGG_EN BIT(21)
+#define INTR_ENABLE_UNSUPP_CMD_EN BIT(19)
+#define INTR_ENABLE_DDMA_TERR_EN BIT(18)
+#define INTR_ENABLE_CDMA_TERR_EN BIT(17)
+#define INTR_ENABLE_CDMA_IDLE_EN BIT(16)
+
+/* Controller internal state. */
+#define CTRL_STATUS 0x0118
+#define CTRL_STATUS_INIT_COMP BIT(9)
+#define CTRL_STATUS_CTRL_BUSY BIT(8)
+
+/* Command Engine threads state. */
+#define TRD_STATUS 0x0120
+
+/* Command Engine interrupt thread error status. */
+#define TRD_ERR_INT_STATUS 0x0128
+/* Command Engine interrupt thread error enable. */
+#define TRD_ERR_INT_STATUS_EN 0x0130
+/* Command Engine interrupt thread complete status. */
+#define TRD_COMP_INT_STATUS 0x0138
+
+/*
+ * Transfer config 0 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_0 0x0400
+/* Offset value from the beginning of the page. */
+#define TRAN_CFG_0_OFFSET GENMASK(31, 16)
+/* Numbers of sectors to transfer within singlNF device's page. */
+#define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
+
+/*
+ * Transfer config 1 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_1 0x0404
+/* Size of last data sector. */
+#define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16)
+/* Size of not-last data sector. */
+#define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
+
+/* ECC engine configuration register 0. */
+#define ECC_CONFIG_0 0x0428
+/* Correction strength. */
+#define ECC_CONFIG_0_CORR_STR GENMASK(10, 8)
+/* Enable erased pages detection mechanism. */
+#define ECC_CONFIG_0_ERASE_DET_EN BIT(1)
+/* Enable controller ECC check bits generation and correction. */
+#define ECC_CONFIG_0_ECC_EN BIT(0)
+
+/* ECC engine configuration register 1. */
+#define ECC_CONFIG_1 0x042C
+
+/* Multiplane settings register. */
+#define MULTIPLANE_CFG 0x0434
+/* Cache operation settings. */
+#define CACHE_CFG 0x0438
+
+/* DMA settings register. */
+#define DMA_SETINGS 0x043C
+/* Enable SDMA error report on access unprepared slave DMA interface. */
+#define DMA_SETINGS_SDMA_ERR_RSP BIT(17)
+
+/* Transferred data block size for the slave DMA module. */
+#define SDMA_SIZE 0x0440
+
+/* Thread number associated with transferred data block
+ * for the slave DMA module.
+ */
+#define SDMA_TRD_NUM 0x0444
+/* Thread number mask. */
+#define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
+
+#define CONTROL_DATA_CTRL 0x0494
+/* Thread number mask. */
+#define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0)
+
+#define CTRL_VERSION 0x800
+#define CTRL_VERSION_REV GENMASK(7, 0)
+
+/* Available hardware features of the controller. */
+#define CTRL_FEATURES 0x804
+/* Support for NV-DDR2/3 work mode. */
+#define CTRL_FEATURES_NVDDR_2_3 BIT(28)
+/* Support for NV-DDR work mode. */
+#define CTRL_FEATURES_NVDDR BIT(27)
+/* Support for asynchronous work mode. */
+#define CTRL_FEATURES_ASYNC BIT(26)
+/* Support for asynchronous work mode. */
+#define CTRL_FEATURES_N_BANKS GENMASK(25, 24)
+/* Slave and Master DMA data width. */
+#define CTRL_FEATURES_DMA_DWITH64 BIT(21)
+/* Availability of Control Data feature.*/
+#define CTRL_FEATURES_CONTROL_DATA BIT(10)
+
+/* BCH Engine identification register 0 - correction strengths. */
+#define BCH_CFG_0 0x838
+#define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0)
+#define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8)
+#define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16)
+#define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24)
+
+/* BCH Engine identification register 1 - correction strengths. */
+#define BCH_CFG_1 0x83C
+#define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0)
+#define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8)
+#define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16)
+#define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24)
+
+/* BCH Engine identification register 2 - sector sizes. */
+#define BCH_CFG_2 0x840
+#define BCH_CFG_2_SECT_0 GENMASK(15, 0)
+#define BCH_CFG_2_SECT_1 GENMASK(31, 16)
+
+/* BCH Engine identification register 3. */
+#define BCH_CFG_3 0x844
+#define BCH_CFG_3_METADATA_SIZE GENMASK(23, 16)
+
+/* Ready/Busy# line status. */
+#define RBN_SETINGS 0x1004
+
+/* Common settings. */
+#define COMMON_SET 0x1008
+/* 16 bit device connected to the NAND Flash interface. */
+#define COMMON_SET_DEVICE_16BIT BIT(8)
+
+/* Skip_bytes registers. */
+#define SKIP_BYTES_CONF 0x100C
+#define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16)
+#define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0)
+
+#define SKIP_BYTES_OFFSET 0x1010
+#define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
+
+/* Timings configuration. */
+#define ASYNC_TOGGLE_TIMINGS 0x101c
+#define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
+#define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
+#define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
+#define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
+
+#define TIMINGS0 0x1024
+#define TIMINGS0_TADL GENMASK(31, 24)
+#define TIMINGS0_TCCS GENMASK(23, 16)
+#define TIMINGS0_TWHR GENMASK(15, 8)
+#define TIMINGS0_TRHW GENMASK(7, 0)
+
+#define TIMINGS1 0x1028
+#define TIMINGS1_TRHZ GENMASK(31, 24)
+#define TIMINGS1_TWB GENMASK(23, 16)
+#define TIMINGS1_TVDLY GENMASK(7, 0)
+
+#define TIMINGS2 0x102c
+#define TIMINGS2_TFEAT GENMASK(25, 16)
+#define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8)
+#define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0)
+
+/* Configuration of the resynchronization of slave DLL of PHY. */
+#define DLL_PHY_CTRL 0x1034
+#define DLL_PHY_CTRL_DLL_RST_N BIT(24)
+#define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17)
+#define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16)
+#define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8)
+#define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0)
+
+/* Register controlling DQ related timing. */
+#define PHY_DQ_TIMING 0x2000
+/* Register controlling DSQ related timing. */
+#define PHY_DQS_TIMING 0x2004
+#define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
+#define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
+#define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
+
+/* Register controlling the gate and loopback control related timing. */
+#define PHY_GATE_LPBK_CTRL 0x2008
+#define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
+
+/* Register holds the control for the master DLL logic. */
+#define PHY_DLL_MASTER_CTRL 0x200C
+#define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
+
+/* Register holds the control for the slave DLL logic. */
+#define PHY_DLL_SLAVE_CTRL 0x2010
+
+/* This register handles the global control settings for the PHY. */
+#define PHY_CTRL 0x2080
+#define PHY_CTRL_SDR_DQS BIT(14)
+#define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
+
+/*
+ * This register handles the global control settings
+ * for the termination selects for reads.
+ */
+#define PHY_TSEL 0x2084
+
+/* Generic command layout. */
+#define GCMD_LAY_CS GENMASK_ULL(11, 8)
+/*
+ * This bit informs the minicotroller if it has to wait for tWB
+ * after sending the last CMD/ADDR/DATA in the sequence.
+ */
+#define GCMD_LAY_TWB BIT_ULL(6)
+/* Type of generic instruction. */
+#define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
+
+/* Generic CMD sequence type. */
+#define GCMD_LAY_INSTR_CMD 0
+/* Generic ADDR sequence type. */
+#define GCMD_LAY_INSTR_ADDR 1
+/* Generic data transfer sequence type. */
+#define GCMD_LAY_INSTR_DATA 2
+
+/* Input part of generic command type of input is command. */
+#define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16)
+
+/* Generic command address sequence - address fields. */
+#define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16)
+/* Generic command address sequence - address size. */
+#define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11)
+
+/* Transfer direction field of generic command data sequence. */
+#define GCMD_DIR BIT_ULL(11)
+/* Read transfer direction of generic command data sequence. */
+#define GCMD_DIR_READ 0
+/* Write transfer direction of generic command data sequence. */
+#define GCMD_DIR_WRITE 1
+
+/* ECC enabled flag of generic command data sequence - ECC enabled. */
+#define GCMD_ECC_EN BIT_ULL(12)
+/* Generic command data sequence - sector size. */
+#define GCMD_SECT_SIZE GENMASK_ULL(31, 16)
+/* Generic command data sequence - sector count. */
+#define GCMD_SECT_CNT GENMASK_ULL(39, 32)
+/* Generic command data sequence - last sector size. */
+#define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
+
+/* CDMA descriptor fields. */
+/* Erase command type of CDMA descriptor. */
+#define CDMA_CT_ERASE 0x1000
+/* Program page command type of CDMA descriptor. */
+#define CDMA_CT_WR 0x2100
+/* Read page command type of CDMA descriptor. */
+#define CDMA_CT_RD 0x2200
+
+/* Flash pointer memory shift. */
+#define CDMA_CFPTR_MEM_SHIFT 24
+/* Flash pointer memory mask. */
+#define CDMA_CFPTR_MEM GENMASK(26, 24)
+
+/*
+ * Command DMA descriptor flags. If set causes issue interrupt after
+ * the completion of descriptor processing.
+ */
+#define CDMA_CF_INT BIT(8)
+/*
+ * Command DMA descriptor flags - the next descriptor
+ * address field is valid and descriptor processing should continue.
+ */
+#define CDMA_CF_CONT BIT(9)
+/* DMA master flag of command DMA descriptor. */
+#define CDMA_CF_DMA_MASTER BIT(10)
+
+/* Operation complete status of command descriptor. */
+#define CDMA_CS_COMP BIT(15)
+/* Operation complete status of command descriptor. */
+/* Command descriptor status - operation fail. */
+#define CDMA_CS_FAIL BIT(14)
+/* Command descriptor status - page erased. */
+#define CDMA_CS_ERP BIT(11)
+/* Command descriptor status - timeout occurred. */
+#define CDMA_CS_TOUT BIT(10)
+/*
+ * Maximum amount of correction applied to one ECC sector.
+ * It is part of command descriptor status.
+ */
+#define CDMA_CS_MAXERR GENMASK(9, 2)
+/* Command descriptor status - uncorrectable ECC error. */
+#define CDMA_CS_UNCE BIT(1)
+/* Command descriptor status - descriptor error. */
+#define CDMA_CS_ERR BIT(0)
+
+/* Status of operation - OK. */
+#define STAT_OK 0
+/* Status of operation - FAIL. */
+#define STAT_FAIL 2
+/* Status of operation - uncorrectable ECC error. */
+#define STAT_ECC_UNCORR 3
+/* Status of operation - page erased. */
+#define STAT_ERASED 5
+/* Status of operation - correctable ECC error. */
+#define STAT_ECC_CORR 6
+/* Status of operation - unsuspected state. */
+#define STAT_UNKNOWN 7
+/* Status of operation - operation is not completed yet. */
+#define STAT_BUSY 0xFF
+
+#define BCH_MAX_NUM_CORR_CAPS 8
+#define BCH_MAX_NUM_SECTOR_SIZES 2
+
+struct cadence_nand_timings {
+ u32 async_toggle_timings;
+ u32 timings0;
+ u32 timings1;
+ u32 timings2;
+ u32 dll_phy_ctrl;
+ u32 phy_ctrl;
+ u32 phy_dqs_timing;
+ u32 phy_gate_lpbk_ctrl;
+};
+
+/* Command DMA descriptor. */
+struct cadence_nand_cdma_desc {
+ /* Next descriptor address. */
+ u64 next_pointer;
+
+ /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
+ u32 flash_pointer;
+ /*field appears in HPNFC version 13*/
+ u16 bank;
+ u16 rsvd0;
+
+ /* Operation the controller needs to perform. */
+ u16 command_type;
+ u16 rsvd1;
+ /* Flags for operation of this command. */
+ u16 command_flags;
+ u16 rsvd2;
+
+ /* System/host memory address required for data DMA commands. */
+ u64 memory_pointer;
+
+ /* Status of operation. */
+ u32 status;
+ u32 rsvd3;
+
+ /* Address pointer to sync buffer location. */
+ u64 sync_flag_pointer;
+
+ /* Controls the buffer sync mechanism. */
+ u32 sync_arguments;
+ u32 rsvd4;
+
+ /* Control data pointer. */
+ u64 ctrl_data_ptr;
+};
+
+/* Interrupt status. */
+struct cadence_nand_irq_status {
+ /* Thread operation complete status. */
+ u32 trd_status;
+ /* Thread operation error. */
+ u32 trd_error;
+ /* Controller status. */
+ u32 status;
+};
+
+/* Cadence NAND flash controller capabilities get from driver data. */
+struct cadence_nand_dt_devdata {
+ /* Skew value of the output signals of the NAND Flash interface. */
+ u32 if_skew;
+ /* It informs if slave DMA interface is connected to DMA engine. */
+ unsigned int has_dma:1;
+};
+
+/* Cadence NAND flash controller capabilities read from registers. */
+struct cdns_nand_caps {
+ /* Maximum number of banks supported by hardware. */
+ u8 max_banks;
+ /* Slave and Master DMA data width in bytes (4 or 8). */
+ u8 data_dma_width;
+ /* Control Data feature supported. */
+ bool data_control_supp;
+ /* Is PHY type DLL. */
+ bool is_phy_type_dll;
+};
+
+struct cdns_nand_ctrl {
+ struct device *dev;
+ struct nand_controller controller;
+ struct cadence_nand_cdma_desc *cdma_desc;
+ /* IP capability. */
+ const struct cadence_nand_dt_devdata *caps1;
+ struct cdns_nand_caps caps2;
+ u8 ctrl_rev;
+ dma_addr_t dma_cdma_desc;
+ u8 *buf;
+ u32 buf_size;
+ u8 curr_corr_str_idx;
+
+ /* Register interface. */
+ void __iomem *reg;
+
+ struct {
+ void __iomem *virt;
+ dma_addr_t dma;
+ } io;
+
+ int irq;
+ /* Interrupts that have happened. */
+ struct cadence_nand_irq_status irq_status;
+ /* Interrupts we are waiting for. */
+ struct cadence_nand_irq_status irq_mask;
+ struct completion complete;
+ /* Protect irq_mask and irq_status. */
+ spinlock_t irq_lock;
+
+ int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
+ struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
+ struct nand_ecc_caps ecc_caps;
+
+ int curr_trans_type;
+
+ struct dma_chan *dmac;
+
+ u32 nf_clk_rate;
+ /*
+ * Estimated Board delay. The value includes the total
+ * round trip delay for the signals and is used for deciding on values
+ * associated with data read capture.
+ */
+ u32 board_delay;
+
+ struct nand_chip *selected_chip;
+
+ unsigned long assigned_cs;
+ struct list_head chips;
+ u8 bch_metadata_size;
+};
+
+struct cdns_nand_chip {
+ struct cadence_nand_timings timings;
+ struct nand_chip chip;
+ u8 nsels;
+ struct list_head node;
+
+ /*
+ * part of oob area of NAND flash memory page.
+ * This part is available for user to read or write.
+ */
+ u32 avail_oob_size;
+
+ /* Sector size. There are few sectors per mtd->writesize */
+ u32 sector_size;
+ u32 sector_count;
+
+ /* Offset of BBM. */
+ u8 bbm_offs;
+ /* Number of bytes reserved for BBM. */
+ u8 bbm_len;
+ /* ECC strength index. */
+ u8 corr_str_idx;
+
+ u8 cs[];
+};
+
+struct ecc_info {
+ int (*calc_ecc_bytes)(int step_size, int strength);
+ int max_step_size;
+};
+
+static inline struct
+cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct cdns_nand_chip, chip);
+}
+
+static inline struct
+cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
+{
+ return container_of(controller, struct cdns_nand_ctrl, controller);
+}
+
+static bool
+cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
+ u32 buf_len)
+{
+ u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
+
+ return buf && virt_addr_valid(buf) &&
+ likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
+ likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
+}
+
+static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
+ u32 reg_offset, u32 timeout_us,
+ u32 mask, bool is_clear)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
+ val, !(val & mask) == is_clear,
+ 10, timeout_us);
+
+ if (ret < 0) {
+ dev_err(cdns_ctrl->dev,
+ "Timeout while waiting for reg %x with mask %x is clear %d\n",
+ reg_offset, mask, is_clear);
+ }
+
+ return ret;
+}
+
+static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
+ bool enable)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+ if (enable)
+ reg |= ECC_CONFIG_0_ECC_EN;
+ else
+ reg &= ~ECC_CONFIG_0_ECC_EN;
+
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+ return 0;
+}
+
+static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 corr_str_idx)
+{
+ u32 reg;
+
+ if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
+ return;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+ reg &= ~ECC_CONFIG_0_CORR_STR;
+ reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+ cdns_ctrl->curr_corr_str_idx = corr_str_idx;
+}
+
+static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 strength)
+{
+ int i, corr_str_idx = -1;
+
+ for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+ if (cdns_ctrl->ecc_strengths[i] == strength) {
+ corr_str_idx = i;
+ break;
+ }
+ }
+
+ return corr_str_idx;
+}
+
+static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
+ u16 marker_value)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+ reg &= ~SKIP_BYTES_MARKER_VALUE;
+ reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
+ marker_value);
+
+ writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+
+ return 0;
+}
+
+static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 num_of_bytes,
+ u32 offset_value,
+ int enable)
+{
+ u32 reg, skip_bytes_offset;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ if (!enable) {
+ num_of_bytes = 0;
+ offset_value = 0;
+ }
+
+ reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+ reg &= ~SKIP_BYTES_NUM_OF_BYTES;
+ reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
+ num_of_bytes);
+ skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
+ offset_value);
+
+ writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+ writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
+
+ return 0;
+}
+
+/* Functions enables/disables hardware detection of erased data */
+static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
+ bool enable,
+ u8 bitflips_threshold)
+{
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+ if (enable)
+ reg |= ECC_CONFIG_0_ERASE_DET_EN;
+ else
+ reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
+
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+ writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
+}
+
+static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
+ bool bit_bus16)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
+
+ if (!bit_bus16)
+ reg &= ~COMMON_SET_DEVICE_16BIT;
+ else
+ reg |= COMMON_SET_DEVICE_16BIT;
+ writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
+
+ return 0;
+}
+
+static void
+cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
+ writel_relaxed(irq_status->trd_status,
+ cdns_ctrl->reg + TRD_COMP_INT_STATUS);
+ writel_relaxed(irq_status->trd_error,
+ cdns_ctrl->reg + TRD_ERR_INT_STATUS);
+}
+
+static void
+cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
+ irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
+ + TRD_COMP_INT_STATUS);
+ irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
+ + TRD_ERR_INT_STATUS);
+}
+
+static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ cadence_nand_read_int_status(cdns_ctrl, irq_status);
+
+ return irq_status->status || irq_status->trd_status ||
+ irq_status->trd_error;
+}
+
+static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
+ memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
+ memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
+ spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
+}
+
+/*
+ * This is the interrupt service routine. It handles all interrupts
+ * sent to this device.
+ */
+static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = dev_id;
+ struct cadence_nand_irq_status irq_status;
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&cdns_ctrl->irq_lock);
+
+ if (irq_detected(cdns_ctrl, &irq_status)) {
+ /* Handle interrupt. */
+ /* First acknowledge it. */
+ cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
+ /* Status in the device context for someone to read. */
+ cdns_ctrl->irq_status.status |= irq_status.status;
+ cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
+ cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
+ /* Notify anyone who cares that it happened. */
+ complete(&cdns_ctrl->complete);
+ /* Tell the OS that we've handled this. */
+ result = IRQ_HANDLED;
+ }
+ spin_unlock(&cdns_ctrl->irq_lock);
+
+ return result;
+}
+
+static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_mask)
+{
+ writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
+ cdns_ctrl->reg + INTR_ENABLE);
+
+ writel_relaxed(irq_mask->trd_error,
+ cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
+}
+
+static void
+cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_mask,
+ struct cadence_nand_irq_status *irq_status)
+{
+ unsigned long timeout = msecs_to_jiffies(10000);
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
+ timeout);
+
+ *irq_status = cdns_ctrl->irq_status;
+ if (time_left == 0) {
+ /* Timeout error. */
+ dev_err(cdns_ctrl->dev, "timeout occurred:\n");
+ dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
+ irq_status->status, irq_mask->status);
+ dev_err(cdns_ctrl->dev,
+ "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
+ irq_status->trd_status, irq_mask->trd_status);
+ dev_err(cdns_ctrl->dev,
+ "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
+ irq_status->trd_error, irq_mask->trd_error);
+ }
+}
+
+/* Execute generic command on NAND controller. */
+static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 chip_nr,
+ u64 mini_ctrl_cmd)
+{
+ u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
+ mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
+ mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_reset_irq(cdns_ctrl);
+
+ writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
+ writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
+
+ /* Select generic command. */
+ reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
+ /* Thread number. */
+ reg |= FIELD_PREP(CMD_REG0_TN, 0);
+
+ /* Issue command. */
+ writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+ return 0;
+}
+
+/* Wait for data on slave DMA interface. */
+static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 *out_sdma_trd,
+ u32 *out_sdma_size)
+{
+ struct cadence_nand_irq_status irq_mask, irq_status;
+
+ irq_mask.trd_status = 0;
+ irq_mask.trd_error = 0;
+ irq_mask.status = INTR_STATUS_SDMA_TRIGG
+ | INTR_STATUS_SDMA_ERR
+ | INTR_STATUS_UNSUPP_CMD;
+
+ cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+ cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+ if (irq_status.status == 0) {
+ dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
+ return -ETIMEDOUT;
+ }
+
+ if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
+ *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
+ *out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
+ *out_sdma_trd =
+ FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
+ } else {
+ dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
+ irq_status.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
+
+ cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
+
+ if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
+ cdns_ctrl->caps2.data_dma_width = 8;
+ else
+ cdns_ctrl->caps2.data_dma_width = 4;
+
+ if (reg & CTRL_FEATURES_CONTROL_DATA)
+ cdns_ctrl->caps2.data_control_supp = true;
+
+ if (reg & (CTRL_FEATURES_NVDDR_2_3
+ | CTRL_FEATURES_NVDDR))
+ cdns_ctrl->caps2.is_phy_type_dll = true;
+}
+
+/* Prepare CDMA descriptor. */
+static void
+cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
+ char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
+ dma_addr_t ctrl_data_ptr, u16 ctype)
+{
+ struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
+
+ memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
+
+ /* Set fields for one descriptor. */
+ cdma_desc->flash_pointer = flash_ptr;
+ if (cdns_ctrl->ctrl_rev >= 13)
+ cdma_desc->bank = nf_mem;
+ else
+ cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
+
+ cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
+ cdma_desc->command_flags |= CDMA_CF_INT;
+
+ cdma_desc->memory_pointer = mem_ptr;
+ cdma_desc->status = 0;
+ cdma_desc->sync_flag_pointer = 0;
+ cdma_desc->sync_arguments = 0;
+
+ cdma_desc->command_type = ctype;
+ cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
+}
+
+static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
+ u32 desc_status)
+{
+ if (desc_status & CDMA_CS_ERP)
+ return STAT_ERASED;
+
+ if (desc_status & CDMA_CS_UNCE)
+ return STAT_ECC_UNCORR;
+
+ if (desc_status & CDMA_CS_ERR) {
+ dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
+ return STAT_FAIL;
+ }
+
+ if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
+ return STAT_ECC_CORR;
+
+ return STAT_FAIL;
+}
+
+static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
+ u8 status = STAT_BUSY;
+
+ if (desc_ptr->status & CDMA_CS_FAIL) {
+ status = cadence_nand_check_desc_error(cdns_ctrl,
+ desc_ptr->status);
+ dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
+ } else if (desc_ptr->status & CDMA_CS_COMP) {
+ /* Descriptor finished with no errors. */
+ if (desc_ptr->command_flags & CDMA_CF_CONT) {
+ dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
+ status = STAT_UNKNOWN;
+ } else {
+ /* Last descriptor. */
+ status = STAT_OK;
+ }
+ }
+
+ return status;
+}
+
+static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 thread)
+{
+ u32 reg;
+ int status;
+
+ /* Wait for thread ready. */
+ status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
+ 1000000,
+ BIT(thread), true);
+ if (status)
+ return status;
+
+ cadence_nand_reset_irq(cdns_ctrl);
+ reinit_completion(&cdns_ctrl->complete);
+
+ writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
+ cdns_ctrl->reg + CMD_REG2);
+ writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
+
+ /* Select CDMA mode. */
+ reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
+ /* Thread number. */
+ reg |= FIELD_PREP(CMD_REG0_TN, thread);
+ /* Issue command. */
+ writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+ return 0;
+}
+
+/* Send SDMA command and wait for finish. */
+static u32
+cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 thread)
+{
+ struct cadence_nand_irq_status irq_mask, irq_status = {0};
+ int status;
+
+ irq_mask.trd_status = BIT(thread);
+ irq_mask.trd_error = BIT(thread);
+ irq_mask.status = INTR_STATUS_CDMA_TERR;
+
+ cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+
+ status = cadence_nand_cdma_send(cdns_ctrl, thread);
+ if (status)
+ return status;
+
+ cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+
+ if (irq_status.status == 0 && irq_status.trd_status == 0 &&
+ irq_status.trd_error == 0) {
+ dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
+ return -ETIMEDOUT;
+ }
+ if (irq_status.status & irq_mask.status) {
+ dev_err(cdns_ctrl->dev, "CDMA command failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * ECC size depends on configured ECC strength and on maximum supported
+ * ECC step size.
+ */
+static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
+{
+ int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
+
+ return ALIGN(nbytes, 2);
+}
+
+#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
+ static int \
+ cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
+ int strength)\
+ {\
+ return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
+ }
+
+CADENCE_NAND_CALC_ECC_BYTES(256)
+CADENCE_NAND_CALC_ECC_BYTES(512)
+CADENCE_NAND_CALC_ECC_BYTES(1024)
+CADENCE_NAND_CALC_ECC_BYTES(2048)
+CADENCE_NAND_CALC_ECC_BYTES(4096)
+
+/* Function reads BCH capabilities. */
+static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
+ int max_step_size = 0, nstrengths, i;
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
+ cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
+ if (cdns_ctrl->bch_metadata_size < 4) {
+ dev_err(cdns_ctrl->dev,
+ "Driver needs at least 4 bytes of BCH meta data\n");
+ return -EIO;
+ }
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
+ cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
+ cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
+ cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
+ cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
+ cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
+ cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
+ cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
+ cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
+ cdns_ctrl->ecc_stepinfos[0].stepsize =
+ FIELD_GET(BCH_CFG_2_SECT_0, reg);
+
+ cdns_ctrl->ecc_stepinfos[1].stepsize =
+ FIELD_GET(BCH_CFG_2_SECT_1, reg);
+
+ nstrengths = 0;
+ for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+ if (cdns_ctrl->ecc_strengths[i] != 0)
+ nstrengths++;
+ }
+
+ ecc_caps->nstepinfos = 0;
+ for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
+ /* ECC strengths are common for all step infos. */
+ cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
+ cdns_ctrl->ecc_stepinfos[i].strengths =
+ cdns_ctrl->ecc_strengths;
+
+ if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
+ ecc_caps->nstepinfos++;
+
+ if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
+ max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
+ }
+ ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
+
+ switch (max_step_size) {
+ case 256:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
+ break;
+ case 512:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
+ break;
+ case 1024:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
+ break;
+ case 2048:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
+ break;
+ case 4096:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
+ break;
+ default:
+ dev_err(cdns_ctrl->dev,
+ "Unsupported sector size(ecc step size) %d\n",
+ max_step_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Hardware initialization. */
+static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ int status;
+ u32 reg;
+
+ status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_INIT_COMP, false);
+ if (status)
+ return status;
+
+ reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
+ cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
+
+ dev_info(cdns_ctrl->dev,
+ "%s: cadence nand controller version reg %x\n",
+ __func__, reg);
+
+ /* Disable cache and multiplane. */
+ writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
+ writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
+
+ /* Clear all interrupts. */
+ writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
+
+ cadence_nand_get_caps(cdns_ctrl);
+ if (cadence_nand_read_bch_caps(cdns_ctrl))
+ return -EIO;
+
+ /*
+ * Set IO width access to 8.
+ * It is because during SW device discovering width access
+ * is expected to be 8.
+ */
+ status = cadence_nand_set_access_width16(cdns_ctrl, false);
+
+ return status;
+}
+
+#define TT_MAIN_OOB_AREAS 2
+#define TT_RAW_PAGE 3
+#define TT_BBM 4
+#define TT_MAIN_OOB_AREA_EXT 5
+
+/* Prepare size of data to transfer. */
+static void
+cadence_nand_prepare_data_size(struct nand_chip *chip,
+ int transfer_type)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 sec_size = 0, offset = 0, sec_cnt = 1;
+ u32 last_sec_size = cdns_chip->sector_size;
+ u32 data_ctrl_size = 0;
+ u32 reg = 0;
+
+ if (cdns_ctrl->curr_trans_type == transfer_type)
+ return;
+
+ switch (transfer_type) {
+ case TT_MAIN_OOB_AREA_EXT:
+ sec_cnt = cdns_chip->sector_count;
+ sec_size = cdns_chip->sector_size;
+ data_ctrl_size = cdns_chip->avail_oob_size;
+ break;
+ case TT_MAIN_OOB_AREAS:
+ sec_cnt = cdns_chip->sector_count;
+ last_sec_size = cdns_chip->sector_size
+ + cdns_chip->avail_oob_size;
+ sec_size = cdns_chip->sector_size;
+ break;
+ case TT_RAW_PAGE:
+ last_sec_size = mtd->writesize + mtd->oobsize;
+ break;
+ case TT_BBM:
+ offset = mtd->writesize + cdns_chip->bbm_offs;
+ last_sec_size = 8;
+ break;
+ }
+
+ reg = 0;
+ reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
+ reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
+ writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
+
+ reg = 0;
+ reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
+ reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
+ writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
+
+ if (cdns_ctrl->caps2.data_control_supp) {
+ reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
+ reg &= ~CONTROL_DATA_CTRL_SIZE;
+ reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
+ writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
+ }
+
+ cdns_ctrl->curr_trans_type = transfer_type;
+}
+
+static int
+cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
+ int page, void *buf, void *ctrl_dat, u32 buf_size,
+ u32 ctrl_dat_size, enum dma_data_direction dir,
+ bool with_ecc)
+{
+ dma_addr_t dma_buf, dma_ctrl_dat = 0;
+ u8 thread_nr = chip_nr;
+ int status;
+ u16 ctype;
+
+ if (dir == DMA_FROM_DEVICE)
+ ctype = CDMA_CT_RD;
+ else
+ ctype = CDMA_CT_WR;
+
+ cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
+
+ dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
+ if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ return -EIO;
+ }
+
+ if (ctrl_dat && ctrl_dat_size) {
+ dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
+ ctrl_dat_size, dir);
+ if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
+ dma_unmap_single(cdns_ctrl->dev, dma_buf,
+ buf_size, dir);
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ return -EIO;
+ }
+ }
+
+ cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
+ dma_buf, dma_ctrl_dat, ctype);
+
+ status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+
+ dma_unmap_single(cdns_ctrl->dev, dma_buf,
+ buf_size, dir);
+
+ if (ctrl_dat && ctrl_dat_size)
+ dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
+ ctrl_dat_size, dir);
+ if (status)
+ return status;
+
+ return cadence_nand_cdma_finish(cdns_ctrl);
+}
+
+static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_timings *t)
+{
+ writel_relaxed(t->async_toggle_timings,
+ cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
+ writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
+ writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
+ writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
+
+ writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
+ writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
+ writel_relaxed(t->phy_dqs_timing,
+ cdns_ctrl->reg + PHY_DQS_TIMING);
+ writel_relaxed(t->phy_gate_lpbk_ctrl,
+ cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
+ writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
+ cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
+ writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
+ }
+}
+
+static int cadence_nand_select_target(struct nand_chip *chip)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (chip == cdns_ctrl->selected_chip)
+ return 0;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
+
+ cadence_nand_set_ecc_strength(cdns_ctrl,
+ cdns_chip->corr_str_idx);
+
+ cadence_nand_set_erase_detection(cdns_ctrl, true,
+ chip->ecc.strength);
+
+ cdns_ctrl->curr_trans_type = -1;
+ cdns_ctrl->selected_chip = chip;
+
+ return 0;
+}
+
+static int cadence_nand_erase(struct nand_chip *chip, u32 page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ int status;
+ u8 thread_nr = cdns_chip->cs[chip->cur_cs];
+
+ cadence_nand_cdma_desc_prepare(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, 0, 0,
+ CDMA_CT_ERASE);
+ status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "erase operation failed\n");
+ return -EIO;
+ }
+
+ status = cadence_nand_cdma_finish(cdns_ctrl);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
+{
+ int status;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ cadence_nand_prepare_data_size(chip, TT_BBM);
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ /*
+ * Read only bad block marker from offset
+ * defined by a memory manufacturer.
+ */
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->oobsize,
+ 0, DMA_FROM_DEVICE, false);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "read BBM failed\n");
+ return -EIO;
+ }
+
+ memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
+
+ return 0;
+}
+
+static int cadence_nand_write_page(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+ u16 marker_val = 0xFFFF;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+ mtd->writesize
+ + cdns_chip->bbm_offs,
+ 1);
+
+ if (oob_required) {
+ marker_val = *(u16 *)(chip->oob_poi
+ + cdns_chip->bbm_offs);
+ } else {
+ /* Set oob data to 0xFF. */
+ memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
+ cdns_chip->avail_oob_size);
+ }
+
+ cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+ cdns_ctrl->caps2.data_control_supp) {
+ u8 *oob;
+
+ if (oob_required)
+ oob = chip->oob_poi;
+ else
+ oob = cdns_ctrl->buf + mtd->writesize;
+
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, (void *)buf, oob,
+ mtd->writesize,
+ cdns_chip->avail_oob_size,
+ DMA_TO_DEVICE, true);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "write page failed\n");
+ return -EIO;
+ }
+
+ return 0;
+ }
+
+ if (oob_required) {
+ /* Transfer the data to the oob area. */
+ memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
+ cdns_chip->avail_oob_size);
+ }
+
+ memcpy(cdns_ctrl->buf, buf, mtd->writesize);
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+
+ return cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize
+ + cdns_chip->avail_oob_size,
+ 0, DMA_TO_DEVICE, true);
+}
+
+static int cadence_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
+
+ return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = cdns_ctrl->buf;
+ int oob_skip = cdns_chip->bbm_len;
+ size_t size = writesize + oobsize;
+ int i, pos, len;
+ int status = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ /*
+ * Fill the buffer with 0xff first except the full page transfer.
+ * This simplifies the logic.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, size);
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ /* Arrange the buffer for syndrome payload/ecc layout. */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, buf, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(tmp_buf + writesize + oob_skip, buf,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ const u8 *oob = chip->oob_poi;
+ u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+ (cdns_chip->sector_size + chip->ecc.bytes)
+ + cdns_chip->sector_size + oob_skip;
+
+ /* BBM at the beginning of the OOB area. */
+ memcpy(tmp_buf + writesize, oob, oob_skip);
+
+ /* OOB free. */
+ memcpy(tmp_buf + oob_data_offset, oob,
+ cdns_chip->avail_oob_size);
+ oob += cdns_chip->avail_oob_size;
+
+ /* OOB ECC. */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ if (i == (ecc_steps - 1))
+ pos += cdns_chip->avail_oob_size;
+
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, oob, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(tmp_buf + writesize + oob_skip, oob,
+ len);
+ oob += len;
+ }
+ }
+ }
+
+ cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+
+ return cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize +
+ mtd->oobsize,
+ 0, DMA_TO_DEVICE, false);
+}
+
+static int cadence_nand_write_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ return cadence_nand_write_page_raw(chip, NULL, true, page);
+}
+
+static int cadence_nand_read_page(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status = 0;
+ int ecc_err_count = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+ mtd->writesize
+ + cdns_chip->bbm_offs, 1);
+
+ /*
+ * If data buffer can be accessed by DMA and data_control feature
+ * is supported then transfer data and oob directly.
+ */
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+ cdns_ctrl->caps2.data_control_supp) {
+ u8 *oob;
+
+ if (oob_required)
+ oob = chip->oob_poi;
+ else
+ oob = cdns_ctrl->buf + mtd->writesize;
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, buf, oob,
+ mtd->writesize,
+ cdns_chip->avail_oob_size,
+ DMA_FROM_DEVICE, true);
+ /* Otherwise use bounce buffer. */
+ } else {
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf,
+ NULL, mtd->writesize
+ + cdns_chip->avail_oob_size,
+ 0, DMA_FROM_DEVICE, true);
+
+ memcpy(buf, cdns_ctrl->buf, mtd->writesize);
+ if (oob_required)
+ memcpy(chip->oob_poi,
+ cdns_ctrl->buf + mtd->writesize,
+ mtd->oobsize);
+ }
+
+ switch (status) {
+ case STAT_ECC_UNCORR:
+ mtd->ecc_stats.failed++;
+ ecc_err_count++;
+ break;
+ case STAT_ECC_CORR:
+ ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
+ cdns_ctrl->cdma_desc->status);
+ mtd->ecc_stats.corrected += ecc_err_count;
+ break;
+ case STAT_ERASED:
+ case STAT_OK:
+ break;
+ default:
+ dev_err(cdns_ctrl->dev, "read page failed\n");
+ return -EIO;
+ }
+
+ if (oob_required)
+ if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
+ return -EIO;
+
+ return ecc_err_count;
+}
+
+/* Reads OOB data from the device. */
+static int cadence_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+
+ return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_read_page_raw(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int oob_skip = cdns_chip->bbm_len;
+ int writesize = mtd->writesize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = cdns_ctrl->buf;
+ int i, pos, len;
+ int status = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize
+ + mtd->oobsize,
+ 0, DMA_FROM_DEVICE, false);
+
+ switch (status) {
+ case STAT_ERASED:
+ case STAT_OK:
+ break;
+ default:
+ dev_err(cdns_ctrl->dev, "read raw page failed\n");
+ return -EIO;
+ }
+
+ /* Arrange the buffer for syndrome payload/ecc layout. */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(buf, tmp_buf + pos, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(buf, tmp_buf + writesize + oob_skip,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ u8 *oob = chip->oob_poi;
+ u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+ (cdns_chip->sector_size + chip->ecc.bytes)
+ + cdns_chip->sector_size + oob_skip;
+
+ /* OOB free. */
+ memcpy(oob, tmp_buf + oob_data_offset,
+ cdns_chip->avail_oob_size);
+
+ /* BBM at the beginning of the OOB area. */
+ memcpy(oob, tmp_buf + writesize, oob_skip);
+
+ oob += cdns_chip->avail_oob_size;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (i == (ecc_steps - 1))
+ pos += cdns_chip->avail_oob_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(oob, tmp_buf + pos, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(oob, tmp_buf + writesize + oob_skip,
+ len);
+ oob += len;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int cadence_nand_read_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ return cadence_nand_read_page_raw(chip, NULL, true, page);
+}
+
+static void cadence_nand_slave_dma_transfer_finished(void *data)
+{
+ struct completion *finished = data;
+
+ complete(finished);
+}
+
+static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
+ void *buf,
+ dma_addr_t dev_dma, size_t len,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(finished);
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ dma_addr_t src_dma, dst_dma, buf_dma;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ chan = cdns_ctrl->dmac;
+ dma_dev = chan->device;
+
+ buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
+ if (dma_mapping_error(dma_dev->dev, buf_dma)) {
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ goto err;
+ }
+
+ if (dir == DMA_FROM_DEVICE) {
+ src_dma = cdns_ctrl->io.dma;
+ dst_dma = buf_dma;
+ } else {
+ src_dma = buf_dma;
+ dst_dma = cdns_ctrl->io.dma;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
+ goto err_unmap;
+ }
+
+ tx->callback = cadence_nand_slave_dma_transfer_finished;
+ tx->callback_param = &finished;
+
+ cookie = dmaengine_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(cdns_ctrl->dmac);
+ wait_for_completion(&finished);
+
+ dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+err:
+ dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
+
+ return -EIO;
+}
+
+static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 *buf, int len)
+{
+ u8 thread_nr = 0;
+ u32 sdma_size;
+ int status;
+
+ /* Wait until slave DMA interface is ready to data transfer. */
+ status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+ if (status)
+ return status;
+
+ if (!cdns_ctrl->caps1->has_dma) {
+ int len_in_words = len >> 2;
+
+ /* read alingment data */
+ ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+ if (sdma_size > len) {
+ /* read rest data from slave DMA interface if any */
+ ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
+ sdma_size / 4 - len_in_words);
+ /* copy rest of data */
+ memcpy(buf + (len_in_words << 2), cdns_ctrl->buf,
+ len - (len_in_words << 2));
+ }
+ return 0;
+ }
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
+ cdns_ctrl->io.dma,
+ len, DMA_FROM_DEVICE);
+ if (status == 0)
+ return 0;
+
+ dev_warn(cdns_ctrl->dev,
+ "Slave DMA transfer failed. Try again using bounce buffer.");
+ }
+
+ /* If DMA transfer is not possible or failed then use bounce buffer. */
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+ cdns_ctrl->io.dma,
+ sdma_size, DMA_FROM_DEVICE);
+
+ if (status) {
+ dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+ return status;
+ }
+
+ memcpy(buf, cdns_ctrl->buf, len);
+
+ return 0;
+}
+
+static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
+ const u8 *buf, int len)
+{
+ u8 thread_nr = 0;
+ u32 sdma_size;
+ int status;
+
+ /* Wait until slave DMA interface is ready to data transfer. */
+ status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+ if (status)
+ return status;
+
+ if (!cdns_ctrl->caps1->has_dma) {
+ int len_in_words = len >> 2;
+
+ iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+ if (sdma_size > len) {
+ /* copy rest of data */
+ memcpy(cdns_ctrl->buf, buf + (len_in_words << 2),
+ len - (len_in_words << 2));
+ /* write all expected by nand controller data */
+ iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
+ sdma_size / 4 - len_in_words);
+ }
+
+ return 0;
+ }
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
+ cdns_ctrl->io.dma,
+ len, DMA_TO_DEVICE);
+ if (status == 0)
+ return 0;
+
+ dev_warn(cdns_ctrl->dev,
+ "Slave DMA transfer failed. Try again using bounce buffer.");
+ }
+
+ /* If DMA transfer is not possible or failed then use bounce buffer. */
+ memcpy(cdns_ctrl->buf, buf, len);
+
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+ cdns_ctrl->io.dma,
+ sdma_size, DMA_TO_DEVICE);
+
+ if (status)
+ dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+
+ return status;
+}
+
+static int cadence_nand_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ int status;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return 0;
+
+ status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
+
+ return status;
+}
+
+static int cadence_nand_cmd_opcode(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ int ret;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_CMD);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
+ instr->ctx.cmd.opcode);
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret)
+ dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
+ instr->ctx.cmd.opcode);
+
+ return ret;
+}
+
+static int cadence_nand_cmd_address(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ unsigned int offset, naddrs;
+ u64 address = 0;
+ const u8 *addrs;
+ int ret;
+ int i;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_ADDR);
+
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < naddrs; i++)
+ address |= (u64)addrs[i] << (8 * i);
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
+ address);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
+ naddrs - 1);
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret)
+ dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
+
+ return ret;
+}
+
+static int cadence_nand_cmd_erase(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ unsigned int op_id;
+
+ if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
+ int i;
+ const struct nand_op_instr *instr = NULL;
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ u32 page = 0;
+
+ instr = &subop->instrs[1];
+ offset = nand_subop_get_addr_start_off(subop, 1);
+ naddrs = nand_subop_get_num_addr_cyc(subop, 1);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < naddrs; i++)
+ page |= (u32)addrs[i] << (8 * i);
+
+ return cadence_nand_erase(chip, page);
+ }
+
+ /*
+ * If it is not an erase operation then handle operation
+ * by calling exec_op function.
+ */
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ int ret;
+ const struct nand_operation nand_op = {
+ .cs = chip->cur_cs,
+ .instrs = &subop->instrs[op_id],
+ .ninstrs = 1};
+ ret = chip->controller->ops->exec_op(chip, &nand_op, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cadence_nand_cmd_data(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int offset, op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ int len = 0;
+ int ret;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_DATA);
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR)
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
+ GCMD_DIR_WRITE);
+
+ len = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
+ if (instr->ctx.data.force_8bit) {
+ ret = cadence_nand_force_byte_access(chip, true);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "cannot change byte access generic data cmd failed\n");
+ return ret;
+ }
+ }
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
+ return ret;
+ }
+
+ if (instr->type == NAND_OP_DATA_IN_INSTR) {
+ void *buf = instr->ctx.data.buf.in + offset;
+
+ ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
+ } else {
+ const void *buf = instr->ctx.data.buf.out + offset;
+
+ ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
+ }
+
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
+ return ret;
+ }
+
+ if (instr->ctx.data.force_8bit) {
+ ret = cadence_nand_force_byte_access(chip, false);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "cannot change byte access generic data cmd failed\n");
+ }
+ }
+
+ return ret;
+}
+
+static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ int status;
+ unsigned int op_id = 0;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr = &subop->instrs[op_id];
+ u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+
+ status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
+ timeout_us,
+ BIT(cdns_chip->cs[chip->cur_cs]),
+ false);
+ return status;
+}
+
+static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_erase,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_opcode,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_address,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_data,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_data,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
+ );
+
+static int cadence_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (!check_only) {
+ int status = cadence_nand_select_target(chip);
+
+ if (status)
+ return status;
+ }
+
+ return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
+ check_only);
+}
+
+static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = cdns_chip->bbm_len;
+ oobregion->length = cdns_chip->avail_oob_size
+ - cdns_chip->bbm_len;
+
+ return 0;
+}
+
+static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = cdns_chip->avail_oob_size;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
+ .free = cadence_nand_ooblayout_free,
+ .ecc = cadence_nand_ooblayout_ecc,
+};
+
+static int calc_cycl(u32 timing, u32 clock)
+{
+ if (timing == 0 || clock == 0)
+ return 0;
+
+ if ((timing % clock) > 0)
+ return timing / clock;
+ else
+ return timing / clock - 1;
+}
+
+/* Calculate max data valid window. */
+static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+ u32 board_delay_skew_min, u32 ext_mode)
+{
+ if (ext_mode == 0)
+ clk_period /= 2;
+
+ return (trp_cnt + 1) * clk_period + trhoh_min +
+ board_delay_skew_min;
+}
+
+/* Calculate data valid window. */
+static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+ u32 trea_max, u32 ext_mode)
+{
+ if (ext_mode == 0)
+ clk_period /= 2;
+
+ return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
+}
+
+static int
+cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ const struct nand_sdr_timings *sdr;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct cadence_nand_timings *t = &cdns_chip->timings;
+ u32 reg;
+ u32 board_delay = cdns_ctrl->board_delay;
+ u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
+ cdns_ctrl->nf_clk_rate);
+ u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
+ u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
+ u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
+ u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
+ u32 if_skew = cdns_ctrl->caps1->if_skew;
+ u32 board_delay_skew_min = board_delay - if_skew;
+ u32 board_delay_skew_max = board_delay + if_skew;
+ u32 dqs_sampl_res, phony_dqs_mod;
+ u32 tdvw, tdvw_min, tdvw_max;
+ u32 ext_rd_mode, ext_wr_mode;
+ u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
+ u32 sampling_point;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ memset(t, 0, sizeof(*t));
+ /* Sampling point calculation. */
+
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ phony_dqs_mod = 2;
+ else
+ phony_dqs_mod = 1;
+
+ dqs_sampl_res = clk_period / phony_dqs_mod;
+
+ tdvw_min = sdr->tREA_max + board_delay_skew_max;
+ /*
+ * The idea of those calculation is to get the optimum value
+ * for tRP and tRH timings. If it is NOT possible to sample data
+ * with optimal tRP/tRH settings, the parameters will be extended.
+ * If clk_period is 50ns (the lowest value) this condition is met
+ * for asynchronous timing modes 1, 2, 3, 4 and 5.
+ * If clk_period is 20ns the condition is met only
+ * for asynchronous timing mode 5.
+ */
+ if (sdr->tRC_min <= clk_period &&
+ sdr->tRP_min <= (clk_period / 2) &&
+ sdr->tREH_min <= (clk_period / 2)) {
+ /* Performance mode. */
+ ext_rd_mode = 0;
+ tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+ sdr->tREA_max, ext_rd_mode);
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
+ board_delay_skew_min,
+ ext_rd_mode);
+ /*
+ * Check if data valid window and sampling point can be found
+ * and is not on the edge (ie. we have hold margin).
+ * If not extend the tRP timings.
+ */
+ if (tdvw > 0) {
+ if (tdvw_max <= tdvw_min ||
+ (tdvw_max % dqs_sampl_res) == 0) {
+ /*
+ * No valid sampling point so the RE pulse need
+ * to be widen widening by half clock cycle.
+ */
+ ext_rd_mode = 1;
+ }
+ } else {
+ /*
+ * There is no valid window
+ * to be able to sample data the tRP need to be widen.
+ * Very safe calculations are performed here.
+ */
+ trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ + dqs_sampl_res) / clk_period;
+ ext_rd_mode = 1;
+ }
+
+ } else {
+ /* Extended read mode. */
+ u32 trh;
+
+ ext_rd_mode = 1;
+ trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
+ trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
+ if (sdr->tREH_min >= trh)
+ trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
+ else
+ trh_cnt = calc_cycl(trh, clk_period);
+
+ tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+ sdr->tREA_max, ext_rd_mode);
+ /*
+ * Check if data valid window and sampling point can be found
+ * or if it is at the edge check if previous is valid
+ * - if not extend the tRP timings.
+ */
+ if (tdvw > 0) {
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+ sdr->tRHOH_min,
+ board_delay_skew_min,
+ ext_rd_mode);
+
+ if ((((tdvw_max / dqs_sampl_res)
+ * dqs_sampl_res) <= tdvw_min) ||
+ (((tdvw_max % dqs_sampl_res) == 0) &&
+ (((tdvw_max / dqs_sampl_res - 1)
+ * dqs_sampl_res) <= tdvw_min))) {
+ /*
+ * Data valid window width is lower than
+ * sampling resolution and do not hit any
+ * sampling point to be sure the sampling point
+ * will be found the RE low pulse width will be
+ * extended by one clock cycle.
+ */
+ trp_cnt = trp_cnt + 1;
+ }
+ } else {
+ /*
+ * There is no valid window to be able to sample data.
+ * The tRP need to be widen.
+ * Very safe calculations are performed here.
+ */
+ trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ + dqs_sampl_res) / clk_period;
+ }
+ }
+
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+ sdr->tRHOH_min,
+ board_delay_skew_min, ext_rd_mode);
+
+ if (sdr->tWC_min <= clk_period &&
+ (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
+ (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
+ ext_wr_mode = 0;
+ } else {
+ u32 twh;
+
+ ext_wr_mode = 1;
+ twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
+ if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
+ twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
+ clk_period);
+
+ twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
+ if (sdr->tWH_min >= twh)
+ twh = sdr->tWH_min;
+
+ twh_cnt = calc_cycl(twh + if_skew, clk_period);
+ }
+
+ reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
+ t->async_toggle_timings = reg;
+ dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
+
+ tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
+ tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
+ twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
+ trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
+ reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
+
+ /*
+ * If timing exceeds delay field in timing register
+ * then use maximum value.
+ */
+ if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
+ reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
+ else
+ reg |= TIMINGS0_TCCS;
+
+ reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
+ reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
+ t->timings0 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
+
+ /* The following is related to single signal so skew is not needed. */
+ trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
+ trhz_cnt = trhz_cnt + 1;
+ twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
+ /*
+ * Because of the two stage syncflop the value must be increased by 3
+ * first value is related with sync, second value is related
+ * with output if delay.
+ */
+ twb_cnt = twb_cnt + 3 + 5;
+ /*
+ * The following is related to the we edge of the random data input
+ * sequence so skew is not needed.
+ */
+ tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
+ reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
+ t->timings1 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
+
+ tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
+ if (tfeat_cnt < twb_cnt)
+ tfeat_cnt = twb_cnt;
+
+ tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
+ tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
+
+ reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
+ t->timings2 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ reg = DLL_PHY_CTRL_DLL_RST_N;
+ if (ext_wr_mode)
+ reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
+ if (ext_rd_mode)
+ reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
+
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
+ t->dll_phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
+ }
+
+ /* Sampling point calculation. */
+ if ((tdvw_max % dqs_sampl_res) > 0)
+ sampling_point = tdvw_max / dqs_sampl_res;
+ else
+ sampling_point = (tdvw_max / dqs_sampl_res - 1);
+
+ if (sampling_point * dqs_sampl_res > tdvw_min) {
+ dll_phy_dqs_timing =
+ FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
+ dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
+ phony_dqs_timing = sampling_point / phony_dqs_mod;
+
+ if ((sampling_point % 2) > 0) {
+ dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
+ if ((tdvw_max % dqs_sampl_res) == 0)
+ /*
+ * Calculation for sampling point at the edge
+ * of data and being odd number.
+ */
+ phony_dqs_timing = (tdvw_max / dqs_sampl_res)
+ / phony_dqs_mod - 1;
+
+ if (!cdns_ctrl->caps2.is_phy_type_dll)
+ phony_dqs_timing--;
+
+ } else {
+ phony_dqs_timing--;
+ }
+ rd_del_sel = phony_dqs_timing + 3;
+ } else {
+ dev_warn(cdns_ctrl->dev,
+ "ERROR : cannot find valid sampling point\n");
+ }
+
+ reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ reg |= PHY_CTRL_SDR_DQS;
+ t->phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
+ dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
+ dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
+ dll_phy_dqs_timing);
+ t->phy_dqs_timing = dll_phy_dqs_timing;
+
+ reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
+ dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
+ reg);
+ t->phy_gate_lpbk_ctrl = reg;
+
+ dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
+ PHY_DLL_MASTER_CTRL_BYPASS_MODE);
+ dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
+ }
+
+ return 0;
+}
+
+static int cadence_nand_attach_chip(struct nand_chip *chip)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ u32 ecc_size;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ret = cadence_nand_set_access_width16(cdns_ctrl, true);
+ if (ret)
+ return ret;
+ }
+
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ cdns_chip->bbm_offs = chip->badblockpos;
+ cdns_chip->bbm_offs &= ~0x01;
+ /* this value should be even number */
+ cdns_chip->bbm_len = 2;
+
+ ret = nand_ecc_choose_conf(chip,
+ &cdns_ctrl->ecc_caps,
+ mtd->oobsize - cdns_chip->bbm_len);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
+ return ret;
+ }
+
+ dev_dbg(cdns_ctrl->dev,
+ "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
+ chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
+
+ /* Error correction configuration. */
+ cdns_chip->sector_size = chip->ecc.size;
+ cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
+ ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
+
+ cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
+
+ if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
+ cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
+
+ if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
+ > mtd->oobsize)
+ cdns_chip->avail_oob_size -= 4;
+
+ ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
+ if (ret < 0)
+ return -EINVAL;
+
+ cdns_chip->corr_str_idx = (u8)ret;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_set_ecc_strength(cdns_ctrl,
+ cdns_chip->corr_str_idx);
+
+ cadence_nand_set_erase_detection(cdns_ctrl, true,
+ chip->ecc.strength);
+
+ /* Override the default read operations. */
+ chip->ecc.read_page = cadence_nand_read_page;
+ chip->ecc.read_page_raw = cadence_nand_read_page_raw;
+ chip->ecc.write_page = cadence_nand_write_page;
+ chip->ecc.write_page_raw = cadence_nand_write_page_raw;
+ chip->ecc.read_oob = cadence_nand_read_oob;
+ chip->ecc.write_oob = cadence_nand_write_oob;
+ chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
+ chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
+
+ if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
+ cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
+
+ /* Is 32-bit DMA supported? */
+ ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
+ return ret;
+ }
+
+ mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
+
+ return 0;
+}
+
+static const struct nand_controller_ops cadence_nand_controller_ops = {
+ .attach_chip = cadence_nand_attach_chip,
+ .exec_op = cadence_nand_exec_op,
+ .setup_interface = cadence_nand_setup_interface,
+};
+
+static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
+ struct device_node *np)
+{
+ struct cdns_nand_chip *cdns_chip;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
+ return -EINVAL;
+ }
+
+ /* Allocate the nand chip structure. */
+ cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
+ (nsels * sizeof(u8)),
+ GFP_KERNEL);
+ if (!cdns_chip) {
+ dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ cdns_chip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ /* Retrieve CS id. */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs >= cdns_ctrl->caps2.max_banks) {
+ dev_err(cdns_ctrl->dev,
+ "invalid reg value: %u (max CS = %d)\n",
+ cs, cdns_ctrl->caps2.max_banks);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
+ dev_err(cdns_ctrl->dev,
+ "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ cdns_chip->cs[i] = cs;
+ }
+
+ chip = &cdns_chip->chip;
+ chip->controller = &cdns_ctrl->controller;
+ nand_set_flash_node(chip, np);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = cdns_ctrl->dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ ret = nand_scan(chip, cdns_chip->nsels);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
+
+ return 0;
+}
+
+static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct cdns_nand_chip *entry, *temp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
+ chip = &entry->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&entry->node);
+ }
+}
+
+static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct device_node *np = cdns_ctrl->dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = cdns_ctrl->caps2.max_banks;
+ int nchips, ret;
+
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(cdns_ctrl->dev,
+ "too many NAND chips: %d (max = %d CS)\n",
+ nchips, max_cs);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ cadence_nand_chips_cleanup(cdns_ctrl);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
+{
+ /* Disable interrupts. */
+ writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
+}
+
+static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ dma_cap_mask_t mask;
+ int ret;
+
+ cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
+ sizeof(*cdns_ctrl->cdma_desc),
+ &cdns_ctrl->dma_cdma_desc,
+ GFP_KERNEL);
+ if (!cdns_ctrl->dma_cdma_desc)
+ return -ENOMEM;
+
+ cdns_ctrl->buf_size = SZ_16K;
+ cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+ goto free_buf_desc;
+ }
+
+ if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
+ IRQF_SHARED, "cadence-nand-controller",
+ cdns_ctrl)) {
+ dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ goto free_buf;
+ }
+
+ spin_lock_init(&cdns_ctrl->irq_lock);
+ init_completion(&cdns_ctrl->complete);
+
+ ret = cadence_nand_hw_init(cdns_ctrl);
+ if (ret)
+ goto disable_irq;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ if (cdns_ctrl->caps1->has_dma) {
+ cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
+ if (!cdns_ctrl->dmac) {
+ dev_err(cdns_ctrl->dev,
+ "Unable to get a DMA channel\n");
+ ret = -EBUSY;
+ goto disable_irq;
+ }
+ }
+
+ nand_controller_init(&cdns_ctrl->controller);
+ INIT_LIST_HEAD(&cdns_ctrl->chips);
+
+ cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
+ cdns_ctrl->curr_corr_str_idx = 0xFF;
+
+ ret = cadence_nand_chips_init(cdns_ctrl);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
+ ret);
+ goto dma_release_chnl;
+ }
+
+ kfree(cdns_ctrl->buf);
+ cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+ goto dma_release_chnl;
+ }
+
+ return 0;
+
+dma_release_chnl:
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+
+disable_irq:
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+
+free_buf:
+ kfree(cdns_ctrl->buf);
+
+free_buf_desc:
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+ cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+ return ret;
+}
+
+/* Driver exit point. */
+static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ cadence_nand_chips_cleanup(cdns_ctrl);
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+ kfree(cdns_ctrl->buf);
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+ cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+}
+
+struct cadence_nand_dt {
+ struct cdns_nand_ctrl cdns_ctrl;
+ struct clk *clk;
+};
+
+static const struct cadence_nand_dt_devdata cadence_nand_default = {
+ .if_skew = 0,
+ .has_dma = 1,
+};
+
+static const struct of_device_id cadence_nand_dt_ids[] = {
+ {
+ .compatible = "cdns,hp-nfc",
+ .data = &cadence_nand_default
+ }, {}
+};
+
+MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
+
+static int cadence_nand_dt_probe(struct platform_device *ofdev)
+{
+ struct resource *res;
+ struct cadence_nand_dt *dt;
+ struct cdns_nand_ctrl *cdns_ctrl;
+ int ret;
+ const struct of_device_id *of_id;
+ const struct cadence_nand_dt_devdata *devdata;
+ u32 val;
+
+ of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
+ if (of_id) {
+ ofdev->id_entry = of_id->data;
+ devdata = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
+ dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+
+ cdns_ctrl = &dt->cdns_ctrl;
+ cdns_ctrl->caps1 = devdata;
+
+ cdns_ctrl->dev = &ofdev->dev;
+ cdns_ctrl->irq = platform_get_irq(ofdev, 0);
+ if (cdns_ctrl->irq < 0)
+ return cdns_ctrl->irq;
+
+ dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
+
+ cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(cdns_ctrl->reg))
+ return PTR_ERR(cdns_ctrl->reg);
+
+ cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
+ if (IS_ERR(cdns_ctrl->io.virt))
+ return PTR_ERR(cdns_ctrl->io.virt);
+ cdns_ctrl->io.dma = res->start;
+
+ dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
+ if (IS_ERR(dt->clk))
+ return PTR_ERR(dt->clk);
+
+ cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
+
+ ret = of_property_read_u32(ofdev->dev.of_node,
+ "cdns,board-delay-ps", &val);
+ if (ret) {
+ val = 4830;
+ dev_info(cdns_ctrl->dev,
+ "missing cdns,board-delay-ps property, %d was set\n",
+ val);
+ }
+ cdns_ctrl->board_delay = val;
+
+ ret = cadence_nand_init(cdns_ctrl);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(ofdev, dt);
+ return 0;
+}
+
+static int cadence_nand_dt_remove(struct platform_device *ofdev)
+{
+ struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
+
+ cadence_nand_remove(&dt->cdns_ctrl);
+
+ return 0;
+}
+
+static struct platform_driver cadence_nand_dt_driver = {
+ .probe = cadence_nand_dt_probe,
+ .remove = cadence_nand_dt_remove,
+ .driver = {
+ .name = "cadence-nand-controller",
+ .of_match_table = cadence_nand_dt_ids,
+ },
+};
+
+module_platform_driver(cadence_nand_dt_driver);
+
+MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
+
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
new file mode 100644
index 000000000..04502d22e
--- /dev/null
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -0,0 +1,887 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for One Laptop Per Child ‘CAFÉ’ controller, aka Marvell 88ALP01
+ *
+ * The data sheet for this device can be found at:
+ * http://wiki.laptop.org/go/Datasheets
+ *
+ * Copyright © 2006 Red Hat, Inc.
+ * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#define DEBUG
+
+#include <linux/device.h>
+#undef DEBUG
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/rslib.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+#define CAFE_NAND_CTRL1 0x00
+#define CAFE_NAND_CTRL2 0x04
+#define CAFE_NAND_CTRL3 0x08
+#define CAFE_NAND_STATUS 0x0c
+#define CAFE_NAND_IRQ 0x10
+#define CAFE_NAND_IRQ_MASK 0x14
+#define CAFE_NAND_DATA_LEN 0x18
+#define CAFE_NAND_ADDR1 0x1c
+#define CAFE_NAND_ADDR2 0x20
+#define CAFE_NAND_TIMING1 0x24
+#define CAFE_NAND_TIMING2 0x28
+#define CAFE_NAND_TIMING3 0x2c
+#define CAFE_NAND_NONMEM 0x30
+#define CAFE_NAND_ECC_RESULT 0x3C
+#define CAFE_NAND_DMA_CTRL 0x40
+#define CAFE_NAND_DMA_ADDR0 0x44
+#define CAFE_NAND_DMA_ADDR1 0x48
+#define CAFE_NAND_ECC_SYN01 0x50
+#define CAFE_NAND_ECC_SYN23 0x54
+#define CAFE_NAND_ECC_SYN45 0x58
+#define CAFE_NAND_ECC_SYN67 0x5c
+#define CAFE_NAND_READ_DATA 0x1000
+#define CAFE_NAND_WRITE_DATA 0x2000
+
+#define CAFE_GLOBAL_CTRL 0x3004
+#define CAFE_GLOBAL_IRQ 0x3008
+#define CAFE_GLOBAL_IRQ_MASK 0x300c
+#define CAFE_NAND_RESET 0x3034
+
+/* Missing from the datasheet: bit 19 of CTRL1 sets CE0 vs. CE1 */
+#define CTRL1_CHIPSELECT (1<<19)
+
+struct cafe_priv {
+ struct nand_chip nand;
+ struct pci_dev *pdev;
+ void __iomem *mmio;
+ struct rs_control *rs;
+ uint32_t ctl1;
+ uint32_t ctl2;
+ int datalen;
+ int nr_data;
+ int data_pos;
+ int page_addr;
+ bool usedma;
+ dma_addr_t dmaaddr;
+ unsigned char *dmabuf;
+};
+
+static int usedma = 1;
+module_param(usedma, int, 0644);
+
+static int skipbbt = 0;
+module_param(skipbbt, int, 0644);
+
+static int debug = 0;
+module_param(debug, int, 0644);
+
+static int regdebug = 0;
+module_param(regdebug, int, 0644);
+
+static int checkecc = 1;
+module_param(checkecc, int, 0644);
+
+static unsigned int numtimings;
+static int timing[3];
+module_param_array(timing, int, &numtimings, 0644);
+
+static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+
+/* Hrm. Why isn't this already conditional on something in the struct device? */
+#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
+
+/* Make it easier to switch to PIO if we need to */
+#define cafe_readl(cafe, addr) readl((cafe)->mmio + CAFE_##addr)
+#define cafe_writel(cafe, datum, addr) writel(datum, (cafe)->mmio + CAFE_##addr)
+
+static int cafe_device_ready(struct nand_chip *chip)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
+ uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+
+ cafe_writel(cafe, irqs, NAND_IRQ);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "NAND device is%s ready, IRQ %x (%x) (%x,%x)\n",
+ result?"":" not", irqs, cafe_readl(cafe, NAND_IRQ),
+ cafe_readl(cafe, GLOBAL_IRQ), cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+ return result;
+}
+
+
+static void cafe_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ if (cafe->usedma)
+ memcpy(cafe->dmabuf + cafe->datalen, buf, len);
+ else
+ memcpy_toio(cafe->mmio + CAFE_NAND_WRITE_DATA + cafe->datalen, buf, len);
+
+ cafe->datalen += len;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes to write buffer. datalen 0x%x\n",
+ len, cafe->datalen);
+}
+
+static void cafe_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ if (cafe->usedma)
+ memcpy(buf, cafe->dmabuf + cafe->datalen, len);
+ else
+ memcpy_fromio(buf, cafe->mmio + CAFE_NAND_READ_DATA + cafe->datalen, len);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes from position 0x%x in read buffer.\n",
+ len, cafe->datalen);
+ cafe->datalen += len;
+}
+
+static uint8_t cafe_read_byte(struct nand_chip *chip)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ uint8_t d;
+
+ cafe_read_buf(chip, &d, 1);
+ cafe_dev_dbg(&cafe->pdev->dev, "Read %02x\n", d);
+
+ return d;
+}
+
+static void cafe_nand_cmdfunc(struct nand_chip *chip, unsigned command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int adrbytes = 0;
+ uint32_t ctl1;
+ uint32_t doneint = 0x80000000;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "cmdfunc %02x, 0x%x, 0x%x\n",
+ command, column, page_addr);
+
+ if (command == NAND_CMD_ERASE2 || command == NAND_CMD_PAGEPROG) {
+ /* Second half of a command we already calculated */
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | command, NAND_CTRL2);
+ ctl1 = cafe->ctl1;
+ cafe->ctl2 &= ~(1<<30);
+ cafe_dev_dbg(&cafe->pdev->dev, "Continue command, ctl1 %08x, #data %d\n",
+ cafe->ctl1, cafe->nr_data);
+ goto do_command;
+ }
+ /* Reset ECC engine */
+ cafe_writel(cafe, 0, NAND_CTRL2);
+
+ /* Emulate NAND_CMD_READOOB on large-page chips */
+ if (mtd->writesize > 512 &&
+ command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* FIXME: Do we need to send read command before sending data
+ for small-page chips, to position the buffer correctly? */
+
+ if (column != -1) {
+ cafe_writel(cafe, column, NAND_ADDR1);
+ adrbytes = 2;
+ if (page_addr != -1)
+ goto write_adr2;
+ } else if (page_addr != -1) {
+ cafe_writel(cafe, page_addr & 0xffff, NAND_ADDR1);
+ page_addr >>= 16;
+ write_adr2:
+ cafe_writel(cafe, page_addr, NAND_ADDR2);
+ adrbytes += 2;
+ if (mtd->size > mtd->writesize << 16)
+ adrbytes++;
+ }
+
+ cafe->data_pos = cafe->datalen = 0;
+
+ /* Set command valid bit, mask in the chip select bit */
+ ctl1 = 0x80000000 | command | (cafe->ctl1 & CTRL1_CHIPSELECT);
+
+ /* Set RD or WR bits as appropriate */
+ if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
+ ctl1 |= (1<<26); /* rd */
+ /* Always 5 bytes, for now */
+ cafe->datalen = 4;
+ /* And one address cycle -- even for STATUS, since the controller doesn't work without */
+ adrbytes = 1;
+ } else if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
+ command == NAND_CMD_READOOB || command == NAND_CMD_RNDOUT) {
+ ctl1 |= 1<<26; /* rd */
+ /* For now, assume just read to end of page */
+ cafe->datalen = mtd->writesize + mtd->oobsize - column;
+ } else if (command == NAND_CMD_SEQIN)
+ ctl1 |= 1<<25; /* wr */
+
+ /* Set number of address bytes */
+ if (adrbytes)
+ ctl1 |= ((adrbytes-1)|8) << 27;
+
+ if (command == NAND_CMD_SEQIN || command == NAND_CMD_ERASE1) {
+ /* Ignore the first command of a pair; the hardware
+ deals with them both at once, later */
+ cafe->ctl1 = ctl1;
+ cafe_dev_dbg(&cafe->pdev->dev, "Setup for delayed command, ctl1 %08x, dlen %x\n",
+ cafe->ctl1, cafe->datalen);
+ return;
+ }
+ /* RNDOUT and READ0 commands need a following byte */
+ if (command == NAND_CMD_RNDOUT)
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_RNDOUTSTART, NAND_CTRL2);
+ else if (command == NAND_CMD_READ0 && mtd->writesize > 512)
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_READSTART, NAND_CTRL2);
+
+ do_command:
+ cafe_dev_dbg(&cafe->pdev->dev, "dlen %x, ctl1 %x, ctl2 %x\n",
+ cafe->datalen, ctl1, cafe_readl(cafe, NAND_CTRL2));
+
+ /* NB: The datasheet lies -- we really should be subtracting 1 here */
+ cafe_writel(cafe, cafe->datalen, NAND_DATA_LEN);
+ cafe_writel(cafe, 0x90000000, NAND_IRQ);
+ if (cafe->usedma && (ctl1 & (3<<25))) {
+ uint32_t dmactl = 0xc0000000 + cafe->datalen;
+ /* If WR or RD bits set, set up DMA */
+ if (ctl1 & (1<<26)) {
+ /* It's a read */
+ dmactl |= (1<<29);
+ /* ... so it's done when the DMA is done, not just
+ the command. */
+ doneint = 0x10000000;
+ }
+ cafe_writel(cafe, dmactl, NAND_DMA_CTRL);
+ }
+ cafe->datalen = 0;
+
+ if (unlikely(regdebug)) {
+ int i;
+ printk("About to write command %08x to register 0\n", ctl1);
+ for (i=4; i< 0x5c; i+=4)
+ printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
+ }
+
+ cafe_writel(cafe, ctl1, NAND_CTRL1);
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+
+ if (1) {
+ int c;
+ uint32_t irqs;
+
+ for (c = 500000; c != 0; c--) {
+ irqs = cafe_readl(cafe, NAND_IRQ);
+ if (irqs & doneint)
+ break;
+ udelay(1);
+ if (!(c % 100000))
+ cafe_dev_dbg(&cafe->pdev->dev, "Wait for ready, IRQ %x\n", irqs);
+ cpu_relax();
+ }
+ cafe_writel(cafe, doneint, NAND_IRQ);
+ cafe_dev_dbg(&cafe->pdev->dev, "Command %x completed after %d usec, irqs %x (%x)\n",
+ command, 500000-c, irqs, cafe_readl(cafe, NAND_IRQ));
+ }
+
+ WARN_ON(cafe->ctl2 & (1<<30));
+
+ switch (command) {
+
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RNDOUT:
+ cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
+ return;
+ }
+ nand_wait_ready(chip);
+ cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
+}
+
+static void cafe_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
+
+ /* Mask the appropriate bit into the stored value of ctl1
+ which will be used by cafe_nand_cmdfunc() */
+ if (chipnr)
+ cafe->ctl1 |= CTRL1_CHIPSELECT;
+ else
+ cafe->ctl1 &= ~CTRL1_CHIPSELECT;
+}
+
+static irqreturn_t cafe_nand_interrupt(int irq, void *id)
+{
+ struct mtd_info *mtd = id;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+ cafe_writel(cafe, irqs & ~0x90000000, NAND_IRQ);
+ if (!irqs)
+ return IRQ_NONE;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "irq, bits %x (%x)\n", irqs, cafe_readl(cafe, NAND_IRQ));
+ return IRQ_HANDLED;
+}
+
+static int cafe_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+}
+
+/* Don't use -- use nand_read_oob_std for now */
+static int cafe_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+}
+/**
+ * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore
+ * we need a special oob layout and handling.
+ */
+static int cafe_nand_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ unsigned int max_bitflips = 0;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
+ cafe_readl(cafe, NAND_ECC_RESULT),
+ cafe_readl(cafe, NAND_ECC_SYN01));
+
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
+ unsigned short syn[8], pat[4];
+ int pos[4];
+ u8 *oob = chip->oob_poi;
+ int i, n;
+
+ for (i=0; i<8; i+=2) {
+ uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
+
+ syn[i] = cafe->rs->codec->index_of[tmp & 0xfff];
+ syn[i+1] = cafe->rs->codec->index_of[(tmp >> 16) & 0xfff];
+ }
+
+ n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0,
+ pat);
+
+ for (i = 0; i < n; i++) {
+ int p = pos[i];
+
+ /* The 12-bit symbols are mapped to bytes here */
+
+ if (p > 1374) {
+ /* out of range */
+ n = -1374;
+ } else if (p == 0) {
+ /* high four bits do not correspond to data */
+ if (pat[i] > 0xff)
+ n = -2048;
+ else
+ buf[0] ^= pat[i];
+ } else if (p == 1365) {
+ buf[2047] ^= pat[i] >> 4;
+ oob[0] ^= pat[i] << 4;
+ } else if (p > 1365) {
+ if ((p & 1) == 1) {
+ oob[3*p/2 - 2048] ^= pat[i] >> 4;
+ oob[3*p/2 - 2047] ^= pat[i] << 4;
+ } else {
+ oob[3*p/2 - 2049] ^= pat[i] >> 8;
+ oob[3*p/2 - 2048] ^= pat[i];
+ }
+ } else if ((p & 1) == 1) {
+ buf[3*p/2] ^= pat[i] >> 4;
+ buf[3*p/2 + 1] ^= pat[i] << 4;
+ } else {
+ buf[3*p/2 - 1] ^= pat[i] >> 8;
+ buf[3*p/2] ^= pat[i];
+ }
+ }
+
+ if (n < 0) {
+ dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
+ cafe_readl(cafe, NAND_ADDR2) * 2048);
+ for (i = 0; i < 0x5c; i += 4)
+ printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
+ mtd->ecc_stats.failed++;
+ } else {
+ dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
+ mtd->ecc_stats.corrected += n;
+ max_bitflips = max_t(unsigned int, max_bitflips, n);
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int cafe_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int cafe_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = chip->ecc.total;
+ oobregion->length = mtd->oobsize - chip->ecc.total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops cafe_ooblayout_ops = {
+ .ecc = cafe_ooblayout_ecc,
+ .free = cafe_ooblayout_free,
+};
+
+/* Ick. The BBT code really ought to be able to work this bit out
+ for itself from the above, at least for the 2KiB case */
+static uint8_t cafe_bbt_pattern_2048[] = { 'B', 'b', 't', '0' };
+static uint8_t cafe_mirror_pattern_2048[] = { '1', 't', 'b', 'B' };
+
+static uint8_t cafe_bbt_pattern_512[] = { 0xBB };
+static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
+
+
+static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 4,
+ .veroffs = 18,
+ .maxblocks = 4,
+ .pattern = cafe_bbt_pattern_2048
+};
+
+static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 4,
+ .veroffs = 18,
+ .maxblocks = 4,
+ .pattern = cafe_mirror_pattern_2048
+};
+
+static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 1,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = cafe_bbt_pattern_512
+};
+
+static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 1,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = cafe_mirror_pattern_512
+};
+
+
+static int cafe_nand_write_page_lowlevel(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ /* Set up ECC autogeneration */
+ cafe->ctl2 |= (1<<30);
+
+ return nand_prog_page_end_op(chip);
+}
+
+/* F_2[X]/(X**6+X+1) */
+static unsigned short gf64_mul(u8 a, u8 b)
+{
+ u8 c;
+ unsigned int i;
+
+ c = 0;
+ for (i = 0; i < 6; i++) {
+ if (a & 1)
+ c ^= b;
+ a >>= 1;
+ b <<= 1;
+ if ((b & 0x40) != 0)
+ b ^= 0x43;
+ }
+
+ return c;
+}
+
+/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
+static u16 gf4096_mul(u16 a, u16 b)
+{
+ u8 ah, al, bh, bl, ch, cl;
+
+ ah = a >> 6;
+ al = a & 0x3f;
+ bh = b >> 6;
+ bl = b & 0x3f;
+
+ ch = gf64_mul(ah ^ al, bh ^ bl) ^ gf64_mul(al, bl);
+ cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
+
+ return (ch << 6) ^ cl;
+}
+
+static int cafe_mul(int x)
+{
+ if (x == 0)
+ return 1;
+ return gf4096_mul(x, 0xe01);
+}
+
+static int cafe_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int err = 0;
+
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
+ &cafe->dmaaddr, GFP_KERNEL);
+ if (!cafe->dmabuf)
+ return -ENOMEM;
+
+ /* Set up DMA address */
+ cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
+ cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
+ cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
+
+ /* Restore the DMA flag */
+ cafe->usedma = usedma;
+
+ cafe->ctl2 = BIT(27); /* Reed-Solomon ECC */
+ if (mtd->writesize == 2048)
+ cafe->ctl2 |= BIT(29); /* 2KiB page size */
+
+ /* Set up ECC according to the type of chip we found */
+ mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
+ if (mtd->writesize == 2048) {
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
+ } else if (mtd->writesize == 512) {
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
+ } else {
+ dev_warn(&cafe->pdev->dev,
+ "Unexpected NAND flash writesize %d. Aborting\n",
+ mtd->writesize);
+ err = -ENOTSUPP;
+ goto out_free_dma;
+ }
+
+ cafe->nand.ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ cafe->nand.ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ cafe->nand.ecc.size = mtd->writesize;
+ cafe->nand.ecc.bytes = 14;
+ cafe->nand.ecc.strength = 4;
+ cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
+ cafe->nand.ecc.write_oob = cafe_nand_write_oob;
+ cafe->nand.ecc.read_page = cafe_nand_read_page;
+ cafe->nand.ecc.read_oob = cafe_nand_read_oob;
+
+ return 0;
+
+ out_free_dma:
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+
+ return err;
+}
+
+static void cafe_nand_detach_chip(struct nand_chip *chip)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+}
+
+static const struct nand_controller_ops cafe_nand_controller_ops = {
+ .attach_chip = cafe_nand_attach_chip,
+ .detach_chip = cafe_nand_detach_chip,
+};
+
+static int cafe_nand_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct mtd_info *mtd;
+ struct cafe_priv *cafe;
+ uint32_t ctrl;
+ int err = 0;
+
+ /* Very old versions shared the same PCI ident for all three
+ functions on the chip. Verify the class too... */
+ if ((pdev->class >> 8) != PCI_CLASS_MEMORY_FLASH)
+ return -ENODEV;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+
+ cafe = kzalloc(sizeof(*cafe), GFP_KERNEL);
+ if (!cafe)
+ return -ENOMEM;
+
+ mtd = nand_to_mtd(&cafe->nand);
+ mtd->dev.parent = &pdev->dev;
+ nand_set_controller_data(&cafe->nand, cafe);
+
+ cafe->pdev = pdev;
+ cafe->mmio = pci_iomap(pdev, 0, 0);
+ if (!cafe->mmio) {
+ dev_warn(&pdev->dev, "failed to iomap\n");
+ err = -ENOMEM;
+ goto out_free_mtd;
+ }
+
+ cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
+ if (!cafe->rs) {
+ err = -ENOMEM;
+ goto out_ior;
+ }
+
+ cafe->nand.legacy.cmdfunc = cafe_nand_cmdfunc;
+ cafe->nand.legacy.dev_ready = cafe_device_ready;
+ cafe->nand.legacy.read_byte = cafe_read_byte;
+ cafe->nand.legacy.read_buf = cafe_read_buf;
+ cafe->nand.legacy.write_buf = cafe_write_buf;
+ cafe->nand.legacy.select_chip = cafe_select_chip;
+ cafe->nand.legacy.set_features = nand_get_set_features_notsupp;
+ cafe->nand.legacy.get_features = nand_get_set_features_notsupp;
+
+ cafe->nand.legacy.chip_delay = 0;
+
+ /* Enable the following for a flash based bad block table */
+ cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
+
+ if (skipbbt)
+ cafe->nand.options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
+
+ if (numtimings && numtimings != 3) {
+ dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings);
+ }
+
+ if (numtimings == 3) {
+ cafe_dev_dbg(&cafe->pdev->dev, "Using provided timings (%08x %08x %08x)\n",
+ timing[0], timing[1], timing[2]);
+ } else {
+ timing[0] = cafe_readl(cafe, NAND_TIMING1);
+ timing[1] = cafe_readl(cafe, NAND_TIMING2);
+ timing[2] = cafe_readl(cafe, NAND_TIMING3);
+
+ if (timing[0] | timing[1] | timing[2]) {
+ cafe_dev_dbg(&cafe->pdev->dev, "Timing registers already set (%08x %08x %08x)\n",
+ timing[0], timing[1], timing[2]);
+ } else {
+ dev_warn(&cafe->pdev->dev, "Timing registers unset; using most conservative defaults\n");
+ timing[0] = timing[1] = timing[2] = 0xffffffff;
+ }
+ }
+
+ /* Start off by resetting the NAND controller completely */
+ cafe_writel(cafe, 1, NAND_RESET);
+ cafe_writel(cafe, 0, NAND_RESET);
+
+ cafe_writel(cafe, timing[0], NAND_TIMING1);
+ cafe_writel(cafe, timing[1], NAND_TIMING2);
+ cafe_writel(cafe, timing[2], NAND_TIMING3);
+
+ cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
+ err = request_irq(pdev->irq, &cafe_nand_interrupt, IRQF_SHARED,
+ "CAFE NAND", mtd);
+ if (err) {
+ dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
+ goto out_free_rs;
+ }
+
+ /* Disable master reset, enable NAND clock */
+ ctrl = cafe_readl(cafe, GLOBAL_CTRL);
+ ctrl &= 0xffffeff0;
+ ctrl |= 0x00007000;
+ cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
+ cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
+ cafe_writel(cafe, 0, NAND_DMA_CTRL);
+
+ cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
+ cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+
+ /* Enable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+ cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
+ cafe_readl(cafe, GLOBAL_CTRL),
+ cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+ /* Do not use the DMA during the NAND identification */
+ cafe->usedma = 0;
+
+ /* Scan to find existence of the device */
+ cafe->nand.legacy.dummy_controller.ops = &cafe_nand_controller_ops;
+ err = nand_scan(&cafe->nand, 2);
+ if (err)
+ goto out_irq;
+
+ pci_set_drvdata(pdev, mtd);
+
+ mtd->name = "cafe_nand";
+ err = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+ if (err)
+ goto out_cleanup_nand;
+
+ goto out;
+
+ out_cleanup_nand:
+ nand_cleanup(&cafe->nand);
+ out_irq:
+ /* Disable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+ free_irq(pdev->irq, mtd);
+ out_free_rs:
+ free_rs(cafe->rs);
+ out_ior:
+ pci_iounmap(pdev, cafe->mmio);
+ out_free_mtd:
+ kfree(cafe);
+ out:
+ return err;
+}
+
+static void cafe_nand_remove(struct pci_dev *pdev)
+{
+ struct mtd_info *mtd = pci_get_drvdata(pdev);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int ret;
+
+ /* Disable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+ free_irq(pdev->irq, mtd);
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ free_rs(cafe->rs);
+ pci_iounmap(pdev, cafe->mmio);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+ kfree(cafe);
+}
+
+static const struct pci_device_id cafe_nand_tbl[] = {
+ { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, cafe_nand_tbl);
+
+static int cafe_nand_resume(struct pci_dev *pdev)
+{
+ uint32_t ctrl;
+ struct mtd_info *mtd = pci_get_drvdata(pdev);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ /* Start off by resetting the NAND controller completely */
+ cafe_writel(cafe, 1, NAND_RESET);
+ cafe_writel(cafe, 0, NAND_RESET);
+ cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
+
+ /* Restore timing configuration */
+ cafe_writel(cafe, timing[0], NAND_TIMING1);
+ cafe_writel(cafe, timing[1], NAND_TIMING2);
+ cafe_writel(cafe, timing[2], NAND_TIMING3);
+
+ /* Disable master reset, enable NAND clock */
+ ctrl = cafe_readl(cafe, GLOBAL_CTRL);
+ ctrl &= 0xffffeff0;
+ ctrl |= 0x00007000;
+ cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
+ cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
+ cafe_writel(cafe, 0, NAND_DMA_CTRL);
+ cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
+ cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+
+ /* Set up DMA address */
+ cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
+ if (sizeof(cafe->dmaaddr) > 4)
+ /* Shift in two parts to shut the compiler up */
+ cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
+ else
+ cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+
+ /* Enable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+ return 0;
+}
+
+static struct pci_driver cafe_nand_pci_driver = {
+ .name = "CAFÉ NAND",
+ .id_table = cafe_nand_tbl,
+ .probe = cafe_nand_probe,
+ .remove = cafe_nand_remove,
+ .resume = cafe_nand_resume,
+};
+
+module_pci_driver(cafe_nand_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("NAND flash driver for OLPC CAFÉ chip");
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
new file mode 100644
index 000000000..282203deb
--- /dev/null
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2005, 2006 Red Hat Inc.
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Tom Sylla <tom.sylla@amd.com>
+ *
+ * Overview:
+ * This is a device driver for the NAND flash controller found on
+ * the AMD CS5535/CS5536 companion chipsets for the Geode processor.
+ * mtd-id for command line partitioning is cs553x_nand_cs[0-3]
+ * where 0-3 reflects the chip select for NAND.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/iopoll.h>
+
+#include <asm/msr.h>
+
+#define NR_CS553X_CONTROLLERS 4
+
+#define MSR_DIVIL_GLD_CAP 0x51400000 /* DIVIL capabilitiies */
+#define CAP_CS5535 0x2df000ULL
+#define CAP_CS5536 0x5df500ULL
+
+/* NAND Timing MSRs */
+#define MSR_NANDF_DATA 0x5140001b /* NAND Flash Data Timing MSR */
+#define MSR_NANDF_CTL 0x5140001c /* NAND Flash Control Timing */
+#define MSR_NANDF_RSVD 0x5140001d /* Reserved */
+
+/* NAND BAR MSRs */
+#define MSR_DIVIL_LBAR_FLSH0 0x51400010 /* Flash Chip Select 0 */
+#define MSR_DIVIL_LBAR_FLSH1 0x51400011 /* Flash Chip Select 1 */
+#define MSR_DIVIL_LBAR_FLSH2 0x51400012 /* Flash Chip Select 2 */
+#define MSR_DIVIL_LBAR_FLSH3 0x51400013 /* Flash Chip Select 3 */
+ /* Each made up of... */
+#define FLSH_LBAR_EN (1ULL<<32)
+#define FLSH_NOR_NAND (1ULL<<33) /* 1 for NAND */
+#define FLSH_MEM_IO (1ULL<<34) /* 1 for MMIO */
+ /* I/O BARs have BASE_ADDR in bits 15:4, IO_MASK in 47:36 */
+ /* MMIO BARs have BASE_ADDR in bits 31:12, MEM_MASK in 63:44 */
+
+/* Pin function selection MSR (IDE vs. flash on the IDE pins) */
+#define MSR_DIVIL_BALL_OPTS 0x51400015
+#define PIN_OPT_IDE (1<<0) /* 0 for flash, 1 for IDE */
+
+/* Registers within the NAND flash controller BAR -- memory mapped */
+#define MM_NAND_DATA 0x00 /* 0 to 0x7ff, in fact */
+#define MM_NAND_CTL 0x800 /* Any even address 0x800-0x80e */
+#define MM_NAND_IO 0x801 /* Any odd address 0x801-0x80f */
+#define MM_NAND_STS 0x810
+#define MM_NAND_ECC_LSB 0x811
+#define MM_NAND_ECC_MSB 0x812
+#define MM_NAND_ECC_COL 0x813
+#define MM_NAND_LAC 0x814
+#define MM_NAND_ECC_CTL 0x815
+
+/* Registers within the NAND flash controller BAR -- I/O mapped */
+#define IO_NAND_DATA 0x00 /* 0 to 3, in fact */
+#define IO_NAND_CTL 0x04
+#define IO_NAND_IO 0x05
+#define IO_NAND_STS 0x06
+#define IO_NAND_ECC_CTL 0x08
+#define IO_NAND_ECC_LSB 0x09
+#define IO_NAND_ECC_MSB 0x0a
+#define IO_NAND_ECC_COL 0x0b
+#define IO_NAND_LAC 0x0c
+
+#define CS_NAND_CTL_DIST_EN (1<<4) /* Enable NAND Distract interrupt */
+#define CS_NAND_CTL_RDY_INT_MASK (1<<3) /* Enable RDY/BUSY# interrupt */
+#define CS_NAND_CTL_ALE (1<<2)
+#define CS_NAND_CTL_CLE (1<<1)
+#define CS_NAND_CTL_CE (1<<0) /* Keep low; 1 to reset */
+
+#define CS_NAND_STS_FLASH_RDY (1<<3)
+#define CS_NAND_CTLR_BUSY (1<<2)
+#define CS_NAND_CMD_COMP (1<<1)
+#define CS_NAND_DIST_ST (1<<0)
+
+#define CS_NAND_ECC_PARITY (1<<2)
+#define CS_NAND_ECC_CLRECC (1<<1)
+#define CS_NAND_ECC_ENECC (1<<0)
+
+struct cs553x_nand_controller {
+ struct nand_controller base;
+ struct nand_chip chip;
+ void __iomem *mmio;
+};
+
+static struct cs553x_nand_controller *
+to_cs553x(struct nand_controller *controller)
+{
+ return container_of(controller, struct cs553x_nand_controller, base);
+}
+
+static int cs553x_write_ctrl_byte(struct cs553x_nand_controller *cs553x,
+ u32 ctl, u8 data)
+{
+ u8 status;
+ int ret;
+
+ writeb(ctl, cs553x->mmio + MM_NAND_CTL);
+ writeb(data, cs553x->mmio + MM_NAND_IO);
+ ret = readb_poll_timeout_atomic(cs553x->mmio + MM_NAND_STS, status,
+ !(status & CS_NAND_CTLR_BUSY), 1,
+ 100000);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void cs553x_data_in(struct cs553x_nand_controller *cs553x, void *buf,
+ unsigned int len)
+{
+ writeb(0, cs553x->mmio + MM_NAND_CTL);
+ while (unlikely(len > 0x800)) {
+ memcpy_fromio(buf, cs553x->mmio, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_fromio(buf, cs553x->mmio, len);
+}
+
+static void cs553x_data_out(struct cs553x_nand_controller *cs553x,
+ const void *buf, unsigned int len)
+{
+ writeb(0, cs553x->mmio + MM_NAND_CTL);
+ while (unlikely(len > 0x800)) {
+ memcpy_toio(cs553x->mmio, buf, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_toio(cs553x->mmio, buf, len);
+}
+
+static int cs553x_wait_ready(struct cs553x_nand_controller *cs553x,
+ unsigned int timeout_ms)
+{
+ u8 mask = CS_NAND_CTLR_BUSY | CS_NAND_STS_FLASH_RDY;
+ u8 status;
+
+ return readb_poll_timeout(cs553x->mmio + MM_NAND_STS, status,
+ (status & mask) == CS_NAND_STS_FLASH_RDY, 100,
+ timeout_ms * 1000);
+}
+
+static int cs553x_exec_instr(struct cs553x_nand_controller *cs553x,
+ const struct nand_op_instr *instr)
+{
+ unsigned int i;
+ int ret = 0;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_CLE,
+ instr->ctx.cmd.opcode);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_ALE,
+ instr->ctx.addr.addrs[i]);
+ if (ret)
+ break;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ cs553x_data_in(cs553x, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ cs553x_data_out(cs553x, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = cs553x_wait_ready(cs553x, instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return ret;
+}
+
+static int cs553x_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
+ unsigned int i;
+ int ret;
+
+ if (check_only)
+ return true;
+
+ /* De-assert the CE pin */
+ writeb(0, cs553x->mmio + MM_NAND_CTL);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = cs553x_exec_instr(cs553x, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ /* Re-assert the CE pin. */
+ writeb(CS_NAND_CTL_CE, cs553x->mmio + MM_NAND_CTL);
+
+ return ret;
+}
+
+static void cs_enable_hwecc(struct nand_chip *this, int mode)
+{
+ struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
+
+ writeb(0x07, cs553x->mmio + MM_NAND_ECC_CTL);
+}
+
+static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
+ uint32_t ecc;
+
+ ecc = readl(cs553x->mmio + MM_NAND_STS);
+
+ ecc_code[1] = ecc >> 8;
+ ecc_code[0] = ecc >> 16;
+ ecc_code[2] = ecc >> 24;
+ return 0;
+}
+
+static struct cs553x_nand_controller *controllers[4];
+
+static int cs553x_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.hwctl = cs_enable_hwecc;
+ chip->ecc.calculate = cs_calculate_ecc;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.strength = 1;
+
+ return 0;
+}
+
+static const struct nand_controller_ops cs553x_nand_controller_ops = {
+ .exec_op = cs553x_exec_op,
+ .attach_chip = cs553x_attach_chip,
+};
+
+static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
+{
+ struct cs553x_nand_controller *controller;
+ int err = 0;
+ struct nand_chip *this;
+ struct mtd_info *new_mtd;
+
+ pr_notice("Probing CS553x NAND controller CS#%d at %sIO 0x%08lx\n",
+ cs, mmio ? "MM" : "P", adr);
+
+ if (!mmio) {
+ pr_notice("PIO mode not yet implemented for CS553X NAND controller\n");
+ return -ENXIO;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ this = &controller->chip;
+ nand_controller_init(&controller->base);
+ controller->base.ops = &cs553x_nand_controller_ops;
+ this->controller = &controller->base;
+ new_mtd = nand_to_mtd(this);
+
+ /* Link the private data with the MTD structure */
+ new_mtd->owner = THIS_MODULE;
+
+ /* map physical address */
+ controller->mmio = ioremap(adr, 4096);
+ if (!controller->mmio) {
+ pr_warn("ioremap cs553x NAND @0x%08lx failed\n", adr);
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ /* Enable the following for a flash based bad block table */
+ this->bbt_options = NAND_BBT_USE_FLASH;
+
+ new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
+ if (!new_mtd->name) {
+ err = -ENOMEM;
+ goto out_ior;
+ }
+
+ /* Scan to find existence of the device */
+ err = nand_scan(this, 1);
+ if (err)
+ goto out_free;
+
+ controllers[cs] = controller;
+ goto out;
+
+out_free:
+ kfree(new_mtd->name);
+out_ior:
+ iounmap(controller->mmio);
+out_mtd:
+ kfree(controller);
+out:
+ return err;
+}
+
+static int is_geode(void)
+{
+ /* These are the CPUs which will have a CS553[56] companion chip */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 5 &&
+ boot_cpu_data.x86_model == 10)
+ return 1; /* Geode LX */
+
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX) &&
+ boot_cpu_data.x86 == 5 &&
+ boot_cpu_data.x86_model == 5)
+ return 1; /* Geode GX (née GX2) */
+
+ return 0;
+}
+
+static int __init cs553x_init(void)
+{
+ int err = -ENXIO;
+ int i;
+ uint64_t val;
+
+ /* If the CPU isn't a Geode GX or LX, abort */
+ if (!is_geode())
+ return -ENXIO;
+
+ /* If it doesn't have the CS553[56], abort */
+ rdmsrl(MSR_DIVIL_GLD_CAP, val);
+ val &= ~0xFFULL;
+ if (val != CAP_CS5535 && val != CAP_CS5536)
+ return -ENXIO;
+
+ /* If it doesn't have the NAND controller enabled, abort */
+ rdmsrl(MSR_DIVIL_BALL_OPTS, val);
+ if (val & PIN_OPT_IDE) {
+ pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
+
+ if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
+ err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
+ }
+
+ /* Register all devices together here. This means we can easily hack it to
+ do mtdconcat etc. if we want to. */
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ if (controllers[i]) {
+ /* If any devices registered, return success. Else the last error. */
+ mtd_device_register(nand_to_mtd(&controllers[i]->chip),
+ NULL, 0);
+ err = 0;
+ }
+ }
+
+ return err;
+}
+
+module_init(cs553x_init);
+
+static void __exit cs553x_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ struct cs553x_nand_controller *controller = controllers[i];
+ struct nand_chip *this = &controller->chip;
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int ret;
+
+ if (!mtd)
+ continue;
+
+ /* Release resources, unregister device */
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(this);
+ kfree(mtd->name);
+ controllers[i] = NULL;
+
+ /* unmap physical address */
+ iounmap(controller->mmio);
+
+ /* Free the MTD device structure */
+ kfree(controller);
+ }
+}
+
+module_exit(cs553x_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("NAND controller driver for AMD CS5535/CS5536 companion chip");
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
new file mode 100644
index 000000000..bfd3f440a
--- /dev/null
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -0,0 +1,924 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * davinci_nand.c - NAND Flash Driver for DaVinci family chips
+ *
+ * Copyright © 2006 Texas Instruments.
+ *
+ * Port to 2.6.23 Copyright © 2008 by:
+ * Sander Huijsen <Shuijsen@optelecom-nkf.com>
+ * Troy Kisky <troy.kisky@boundarydevices.com>
+ * Dirk Behme <Dirk.Behme@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mtd-davinci-aemif.h>
+
+/*
+ * This is a device driver for the NAND flash controller found on the
+ * various DaVinci family chips. It handles up to four SoC chipselects,
+ * and some flavors of secondary chipselect (e.g. based on A12) as used
+ * with multichip packages.
+ *
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
+ * available on chips like the DM355 and OMAP-L137 and needed with the
+ * more error-prone MLC NAND chips.
+ *
+ * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
+ * outputs in a "wire-AND" configuration, with no per-chip signals.
+ */
+struct davinci_nand_info {
+ struct nand_controller controller;
+ struct nand_chip chip;
+
+ struct platform_device *pdev;
+
+ bool is_readmode;
+
+ void __iomem *base;
+ void __iomem *vaddr;
+
+ void __iomem *current_cs;
+
+ uint32_t mask_chipsel;
+ uint32_t mask_ale;
+ uint32_t mask_cle;
+
+ uint32_t core_chipsel;
+
+ struct davinci_aemif_timing *timing;
+};
+
+static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
+
+static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
+}
+
+static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
+ int offset)
+{
+ return __raw_readl(info->base + offset);
+}
+
+static inline void davinci_nand_writel(struct davinci_nand_info *info,
+ int offset, unsigned long value)
+{
+ __raw_writel(value, info->base + offset);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * 1-bit hardware ECC ... context maintained for each core chipselect
+ */
+
+static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+
+ return davinci_nand_readl(info, NANDF1ECC_OFFSET
+ + 4 * info->core_chipsel);
+}
+
+static void nand_davinci_hwctl_1bit(struct nand_chip *chip, int mode)
+{
+ struct davinci_nand_info *info;
+ uint32_t nandcfr;
+ unsigned long flags;
+
+ info = to_davinci_nand(nand_to_mtd(chip));
+
+ /* Reset ECC hardware */
+ nand_davinci_readecc_1bit(nand_to_mtd(chip));
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Restart ECC hardware */
+ nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
+ nandcfr |= BIT(8 + info->core_chipsel);
+ davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/*
+ * Read hardware ECC value and pack into three bytes
+ */
+static int nand_davinci_calculate_1bit(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ unsigned int ecc_val = nand_davinci_readecc_1bit(nand_to_mtd(chip));
+ unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
+
+ /* invert so that erased block ecc is correct */
+ ecc24 = ~ecc24;
+ ecc_code[0] = (u_char)(ecc24);
+ ecc_code[1] = (u_char)(ecc24 >> 8);
+ ecc_code[2] = (u_char)(ecc24 >> 16);
+
+ return 0;
+}
+
+static int nand_davinci_correct_1bit(struct nand_chip *chip, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
+ (read_ecc[2] << 16);
+ uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
+ (calc_ecc[2] << 16);
+ uint32_t diff = eccCalc ^ eccNand;
+
+ if (diff) {
+ if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
+ /* Correctable error */
+ if ((diff >> (12 + 3)) < chip->ecc.size) {
+ dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
+ return 1;
+ } else {
+ return -EBADMSG;
+ }
+ } else if (!(diff & (diff - 1))) {
+ /* Single bit ECC error in the ECC itself,
+ * nothing to fix */
+ return 1;
+ } else {
+ /* Uncorrectable error */
+ return -EBADMSG;
+ }
+
+ }
+ return 0;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_PLACEMENT_INTERLEAVED
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct nand_chip *chip, int mode)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ unsigned long flags;
+ u32 val;
+
+ /* Reset ECC hardware */
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Start 4-bit ECC calculation for read/write */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val &= ~(0x03 << 4);
+ val |= (info->core_chipsel << 4) | BIT(12);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ info->is_readmode = (mode == NAND_ECC_READ);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+ const u32 mask = 0x03ff03ff;
+
+ code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+ code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+ code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+ code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ u32 raw_ecc[4], *p;
+ unsigned i;
+
+ /* After a read, terminate ECC calculation by a dummy read
+ * of some 4-bit ECC register. ECC covers everything that
+ * was read; correct() just uses the hardware state, so
+ * ecc_code is not needed.
+ */
+ if (info->is_readmode) {
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+ return 0;
+ }
+
+ /* Pack eight raw 10-bit ecc values into ten bytes, making
+ * two passes which each convert four values (in upper and
+ * lower halves of two 32-bit words) into five bytes. The
+ * ROM boot loader uses this same packing scheme.
+ */
+ nand_davinci_readecc_4bit(info, raw_ecc);
+ for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+ *ecc_code++ = p[0] & 0xff;
+ *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
+ *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
+ *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+ *ecc_code++ = (p[1] >> 18) & 0xff;
+ }
+
+ return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data,
+ u_char *ecc_code, u_char *null)
+{
+ int i;
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ unsigned short ecc10[8];
+ unsigned short *ecc16;
+ u32 syndrome[4];
+ u32 ecc_state;
+ unsigned num_errors, corrected;
+ unsigned long timeo;
+
+ /* Unpack ten bytes into eight 10 bit values. We know we're
+ * little-endian, and use type punning for less shifting/masking.
+ */
+ if (WARN_ON(0x01 & (uintptr_t)ecc_code))
+ return -EINVAL;
+ ecc16 = (unsigned short *)ecc_code;
+
+ ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
+ ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+ ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
+ ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
+ ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
+ ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
+ ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
+ ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
+
+ /* Tell ECC controller about the expected ECC codes. */
+ for (i = 7; i >= 0; i--)
+ davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+ /* Allow time for syndrome calculation ... then read it.
+ * A syndrome of all zeroes 0 means no detected errors.
+ */
+ davinci_nand_readl(info, NANDFSR_OFFSET);
+ nand_davinci_readecc_4bit(info, syndrome);
+ if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+ return 0;
+
+ /*
+ * Clear any previous address calculation by doing a dummy read of an
+ * error address register.
+ */
+ davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
+
+ /* Start address calculation, and wait for it to complete.
+ * We _could_ start reading more data while this is working,
+ * to speed up the overall page read.
+ */
+ davinci_nand_writel(info, NANDFCR_OFFSET,
+ davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+
+ /*
+ * ECC_STATE field reads 0x3 (Error correction complete) immediately
+ * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
+ * begin trying to poll for the state, you may fall right out of your
+ * loop without any of the correction calculations having taken place.
+ * The recommendation from the hardware team is to initially delay as
+ * long as ECC_STATE reads less than 4. After that, ECC HW has entered
+ * correction state.
+ */
+ timeo = jiffies + usecs_to_jiffies(100);
+ do {
+ ecc_state = (davinci_nand_readl(info,
+ NANDFSR_OFFSET) >> 8) & 0x0f;
+ cpu_relax();
+ } while ((ecc_state < 4) && time_before(jiffies, timeo));
+
+ for (;;) {
+ u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+ switch ((fsr >> 8) & 0x0f) {
+ case 0: /* no error, should not happen */
+ davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
+ return 0;
+ case 1: /* five or more errors detected */
+ davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
+ return -EBADMSG;
+ case 2: /* error addresses computed */
+ case 3:
+ num_errors = 1 + ((fsr >> 16) & 0x03);
+ goto correct;
+ default: /* still working on it */
+ cpu_relax();
+ continue;
+ }
+ }
+
+correct:
+ /* correct each error */
+ for (i = 0, corrected = 0; i < num_errors; i++) {
+ int error_address, error_value;
+
+ if (i > 1) {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD2_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL2_OFFSET);
+ } else {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD1_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL1_OFFSET);
+ }
+
+ if (i & 1) {
+ error_address >>= 16;
+ error_value >>= 16;
+ }
+ error_address &= 0x3ff;
+ error_address = (512 + 7) - error_address;
+
+ if (error_address < 512) {
+ data[error_address] ^= error_value;
+ corrected++;
+ }
+ }
+
+ return corrected;
+}
+
+/**
+ * nand_davinci_read_page_hwecc_oob_first - Hardware ECC page read with ECC
+ * data read from OOB area
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Hardware ECC for large page chips, which requires the ECC data to be
+ * extracted from the OOB before the actual data is read.
+ */
+static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip,
+ uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ /* Read the OOB area first */
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ /* Move read cursor to start of page */
+ ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ &ecc_code[i],
+ eccbytes, NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/*----------------------------------------------------------------------*/
+
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 2)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else if (section == 1) {
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ } else {
+ oobregion->offset = 13;
+ oobregion->length = 3;
+ }
+
+ return 0;
+}
+
+static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 8;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 16;
+ oobregion->length = mtd->oobsize - 16;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
+ .ecc = hwecc4_ooblayout_small_ecc,
+ .free = hwecc4_ooblayout_small_free,
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id davinci_nand_of_match[] = {
+ {.compatible = "ti,davinci-nand", },
+ {.compatible = "ti,keystone-nand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
+
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
+ struct davinci_nand_pdata *pdata;
+ const char *mode;
+ u32 prop;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct davinci_nand_pdata),
+ GFP_KERNEL);
+ pdev->dev.platform_data = pdata;
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-chipselect", &prop))
+ pdata->core_chipsel = prop;
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-ale", &prop))
+ pdata->mask_ale = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-cle", &prop))
+ pdata->mask_cle = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-chipsel", &prop))
+ pdata->mask_chipsel = prop;
+ if (!of_property_read_string(pdev->dev.of_node,
+ "ti,davinci-ecc-mode", &mode)) {
+ if (!strncmp("none", mode, 4))
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE;
+ if (!strncmp("soft", mode, 4))
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ if (!strncmp("hw", mode, 2))
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ }
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-ecc-bits", &prop))
+ pdata->ecc_bits = prop;
+
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-nand-buswidth", &prop) && prop == 16)
+ pdata->options |= NAND_BUSWIDTH_16;
+
+ if (of_property_read_bool(pdev->dev.of_node,
+ "ti,davinci-nand-use-bbt"))
+ pdata->bbt_options = NAND_BBT_USE_FLASH;
+
+ /*
+ * Since kernel v4.8, this driver has been fixed to enable
+ * use of 4-bit hardware ECC with subpages and verified on
+ * TI's keystone EVMs (K2L, K2HK and K2E).
+ * However, in the interest of not breaking systems using
+ * existing UBI partitions, sub-page writes are not being
+ * (re)enabled. If you want to use subpage writes on Keystone
+ * platforms (i.e. do not have any existing UBI partitions),
+ * then use "ti,davinci-nand" as the compatible in your
+ * device-tree file.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "ti,keystone-nand")) {
+ pdata->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+ }
+
+ return dev_get_platdata(&pdev->dev);
+}
+#else
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ return dev_get_platdata(&pdev->dev);
+}
+#endif
+
+static int davinci_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev);
+ int ret = 0;
+
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ /* Use board-specific ECC config */
+ info->chip.ecc.engine_type = pdata->engine_type;
+ info->chip.ecc.placement = pdata->ecc_placement;
+
+ switch (info->chip.ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ pdata->ecc_bits = 0;
+ break;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ pdata->ecc_bits = 0;
+ /*
+ * This driver expects Hamming based ECC when engine_type is set
+ * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
+ * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo
+ * field to davinci_nand_pdata.
+ */
+ info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
+ break;
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ if (pdata->ecc_bits == 4) {
+ int chunks = mtd->writesize / 512;
+
+ if (!chunks || mtd->oobsize < 16) {
+ dev_dbg(&info->pdev->dev, "too small\n");
+ return -EINVAL;
+ }
+
+ /*
+ * No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
+
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ return ret;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+ info->chip.ecc.algo = NAND_ECC_ALGO_BCH;
+
+ /*
+ * Update ECC layout if needed ... for 1-bit HW ECC, the
+ * default is OK, but it allocates 6 bytes when only 3
+ * are needed (for each 512 bytes). For 4-bit HW ECC,
+ * the default is not usable: 10 bytes needed, not 6.
+ *
+ * For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ mtd_set_ooblayout(mtd,
+ &hwecc4_small_ooblayout_ops);
+ } else if (chunks == 4 || chunks == 8) {
+ mtd_set_ooblayout(mtd,
+ nand_get_large_page_ooblayout());
+ info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first;
+ } else {
+ return -EIO;
+ }
+ } else {
+ /* 1bit ecc hamming */
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
+ }
+ info->chip.ecc.size = 512;
+ info->chip.ecc.strength = pdata->ecc_bits;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ ioread8_rep(info->current_cs, buf, len);
+ else if (alignment & 3)
+ ioread16_rep(info->current_cs, buf, len >> 1);
+ else
+ ioread32_rep(info->current_cs, buf, len >> 2);
+}
+
+static void nand_davinci_data_out(struct davinci_nand_info *info,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ iowrite8_rep(info->current_cs, buf, len);
+ else if (alignment & 3)
+ iowrite16_rep(info->current_cs, buf, len >> 1);
+ else
+ iowrite32_rep(info->current_cs, buf, len >> 2);
+}
+
+static int davinci_nand_exec_instr(struct davinci_nand_info *info,
+ const struct nand_op_instr *instr)
+{
+ unsigned int i, timeout_us;
+ u32 status;
+ int ret;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ iowrite8(instr->ctx.cmd.opcode,
+ info->current_cs + info->mask_cle);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ iowrite8(instr->ctx.addr.addrs[i],
+ info->current_cs + info->mask_ale);
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nand_davinci_data_in(info, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nand_davinci_data_out(info, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+ ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
+ status, status & BIT(0), 100,
+ timeout_us);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return 0;
+}
+
+static int davinci_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ unsigned int i;
+
+ if (check_only)
+ return 0;
+
+ info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ int ret;
+
+ ret = davinci_nand_exec_instr(info, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops davinci_nand_controller_ops = {
+ .attach_chip = davinci_nand_attach_chip,
+ .exec_op = davinci_nand_exec_op,
+};
+
+static int nand_davinci_probe(struct platform_device *pdev)
+{
+ struct davinci_nand_pdata *pdata;
+ struct davinci_nand_info *info;
+ struct resource *res1;
+ struct resource *res2;
+ void __iomem *vaddr;
+ void __iomem *base;
+ int ret;
+ uint32_t val;
+ struct mtd_info *mtd;
+
+ pdata = nand_davinci_get_pdata(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ /* insist on board-specific configuration */
+ if (!pdata)
+ return -ENODEV;
+
+ /* which external chipselect will we be managing? */
+ if (pdata->core_chipsel < 0 || pdata->core_chipsel > 3)
+ return -ENODEV;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, info);
+
+ res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res1 || !res2) {
+ dev_err(&pdev->dev, "resource missing\n");
+ return -EINVAL;
+ }
+
+ vaddr = devm_ioremap_resource(&pdev->dev, res1);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ /*
+ * This registers range is used to setup NAND settings. In case with
+ * TI AEMIF driver, the same memory address range is requested already
+ * by AEMIF, so we cannot request it twice, just ioremap.
+ * The AEMIF and NAND drivers not use the same registers in this range.
+ */
+ base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
+ return -EADDRNOTAVAIL;
+ }
+
+ info->pdev = pdev;
+ info->base = base;
+ info->vaddr = vaddr;
+
+ mtd = nand_to_mtd(&info->chip);
+ mtd->dev.parent = &pdev->dev;
+ nand_set_flash_node(&info->chip, pdev->dev.of_node);
+
+ /* options such as NAND_BBT_USE_FLASH */
+ info->chip.bbt_options = pdata->bbt_options;
+ /* options such as 16-bit widths */
+ info->chip.options = pdata->options;
+ info->chip.bbt_td = pdata->bbt_td;
+ info->chip.bbt_md = pdata->bbt_md;
+ info->timing = pdata->timing;
+
+ info->current_cs = info->vaddr;
+ info->core_chipsel = pdata->core_chipsel;
+ info->mask_chipsel = pdata->mask_chipsel;
+
+ /* use nandboot-capable ALE/CLE masks by default */
+ info->mask_ale = pdata->mask_ale ? : MASK_ALE;
+ info->mask_cle = pdata->mask_cle ? : MASK_CLE;
+
+ spin_lock_irq(&davinci_nand_lock);
+
+ /* put CSxNAND into NAND mode */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val |= BIT(info->core_chipsel);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ spin_unlock_irq(&davinci_nand_lock);
+
+ /* Scan to find existence of the device(s) */
+ nand_controller_init(&info->controller);
+ info->controller.ops = &davinci_nand_controller_ops;
+ info->chip.controller = &info->controller;
+ ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
+ return ret;
+ }
+
+ if (pdata->parts)
+ ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ else
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret < 0)
+ goto err_cleanup_nand;
+
+ val = davinci_nand_readl(info, NRCSR_OFFSET);
+ dev_info(&pdev->dev, "controller rev. %d.%d\n",
+ (val >> 8) & 0xff, val & 0xff);
+
+ return 0;
+
+err_cleanup_nand:
+ nand_cleanup(&info->chip);
+
+ return ret;
+}
+
+static int nand_davinci_remove(struct platform_device *pdev)
+{
+ struct davinci_nand_info *info = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &info->chip;
+ int ret;
+
+ spin_lock_irq(&davinci_nand_lock);
+ if (info->chip.ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ return 0;
+}
+
+static struct platform_driver nand_davinci_driver = {
+ .probe = nand_davinci_probe,
+ .remove = nand_davinci_remove,
+ .driver = {
+ .name = "davinci_nand",
+ .of_match_table = of_match_ptr(davinci_nand_of_match),
+ },
+};
+MODULE_ALIAS("platform:davinci_nand");
+
+module_platform_driver(nand_davinci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("Davinci NAND flash driver");
+
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
new file mode 100644
index 000000000..fa2439cb4
--- /dev/null
+++ b/drivers/mtd/nand/raw/denali.c
@@ -0,0 +1,1381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * Copyright (c) 2017-2019 Socionext Inc.
+ * Reworked by Masahiro Yamada <yamada.masahiro@socionext.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME "denali-nand"
+
+/* for Indexed Addressing */
+#define DENALI_INDEXED_CTRL 0x00
+#define DENALI_INDEXED_DATA 0x10
+
+#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
+#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
+#define DENALI_MAP10 (2 << 26) /* high-level control plane */
+#define DENALI_MAP11 (3 << 26) /* direct controller access */
+
+/* MAP11 access cycle type */
+#define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
+#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
+#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
+
+#define DENALI_BANK(denali) ((denali)->active_bank << 24)
+
+#define DENALI_INVALID_BANK -1
+
+static struct denali_chip *to_denali_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct denali_chip, chip);
+}
+
+static struct denali_controller *to_denali_controller(struct nand_chip *chip)
+{
+ return container_of(chip->controller, struct denali_controller,
+ controller);
+}
+
+/*
+ * Direct Addressing - the slave address forms the control information (command
+ * type, bank, block, and page address). The slave data is the actual data to
+ * be transferred. This mode requires 28 bits of address region allocated.
+ */
+static u32 denali_direct_read(struct denali_controller *denali, u32 addr)
+{
+ return ioread32(denali->host + addr);
+}
+
+static void denali_direct_write(struct denali_controller *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(data, denali->host + addr);
+}
+
+/*
+ * Indexed Addressing - address translation module intervenes in passing the
+ * control information. This mode reduces the required address range. The
+ * control information and transferred data are latched by the registers in
+ * the translation module.
+ */
+static u32 denali_indexed_read(struct denali_controller *denali, u32 addr)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ return ioread32(denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_indexed_write(struct denali_controller *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ iowrite32(data, denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_enable_irq(struct denali_controller *denali)
+{
+ int i;
+
+ for (i = 0; i < denali->nbanks; i++)
+ iowrite32(U32_MAX, denali->reg + INTR_EN(i));
+ iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
+}
+
+static void denali_disable_irq(struct denali_controller *denali)
+{
+ int i;
+
+ for (i = 0; i < denali->nbanks; i++)
+ iowrite32(0, denali->reg + INTR_EN(i));
+ iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
+}
+
+static void denali_clear_irq(struct denali_controller *denali,
+ int bank, u32 irq_status)
+{
+ /* write one to clear bits */
+ iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
+}
+
+static void denali_clear_irq_all(struct denali_controller *denali)
+{
+ int i;
+
+ for (i = 0; i < denali->nbanks; i++)
+ denali_clear_irq(denali, i, U32_MAX);
+}
+
+static irqreturn_t denali_isr(int irq, void *dev_id)
+{
+ struct denali_controller *denali = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ u32 irq_status;
+ int i;
+
+ spin_lock(&denali->irq_lock);
+
+ for (i = 0; i < denali->nbanks; i++) {
+ irq_status = ioread32(denali->reg + INTR_STATUS(i));
+ if (irq_status)
+ ret = IRQ_HANDLED;
+
+ denali_clear_irq(denali, i, irq_status);
+
+ if (i != denali->active_bank)
+ continue;
+
+ denali->irq_status |= irq_status;
+
+ if (denali->irq_status & denali->irq_mask)
+ complete(&denali->complete);
+ }
+
+ spin_unlock(&denali->irq_lock);
+
+ return ret;
+}
+
+static void denali_reset_irq(struct denali_controller *denali)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&denali->irq_lock, flags);
+ denali->irq_status = 0;
+ denali->irq_mask = 0;
+ spin_unlock_irqrestore(&denali->irq_lock, flags);
+}
+
+static u32 denali_wait_for_irq(struct denali_controller *denali, u32 irq_mask)
+{
+ unsigned long time_left, flags;
+ u32 irq_status;
+
+ spin_lock_irqsave(&denali->irq_lock, flags);
+
+ irq_status = denali->irq_status;
+
+ if (irq_mask & irq_status) {
+ /* return immediately if the IRQ has already happened. */
+ spin_unlock_irqrestore(&denali->irq_lock, flags);
+ return irq_status;
+ }
+
+ denali->irq_mask = irq_mask;
+ reinit_completion(&denali->complete);
+ spin_unlock_irqrestore(&denali->irq_lock, flags);
+
+ time_left = wait_for_completion_timeout(&denali->complete,
+ msecs_to_jiffies(1000));
+ if (!time_left) {
+ dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
+ irq_mask);
+ return 0;
+ }
+
+ return denali->irq_status;
+}
+
+static void denali_select_target(struct nand_chip *chip, int cs)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct denali_chip_sel *sel = &to_denali_chip(chip)->sels[cs];
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ denali->active_bank = sel->bank;
+
+ iowrite32(1 << (chip->phys_erase_shift - chip->page_shift),
+ denali->reg + PAGES_PER_BLOCK);
+ iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
+ denali->reg + DEVICE_WIDTH);
+ iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
+ iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
+ iowrite32(chip->options & NAND_ROW_ADDR_3 ?
+ 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+ denali->reg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+ FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
+ denali->reg + ECC_CORRECTION);
+ iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
+ iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
+ iowrite32(chip->ecc.steps, denali->reg + CFG_NUM_DATA_BLOCKS);
+
+ if (chip->options & NAND_KEEP_TIMINGS)
+ return;
+
+ /* update timing registers unless NAND_KEEP_TIMINGS is set */
+ iowrite32(sel->hwhr2_and_we_2_re, denali->reg + TWHR2_AND_WE_2_RE);
+ iowrite32(sel->tcwaw_and_addr_2_data,
+ denali->reg + TCWAW_AND_ADDR_2_DATA);
+ iowrite32(sel->re_2_we, denali->reg + RE_2_WE);
+ iowrite32(sel->acc_clks, denali->reg + ACC_CLKS);
+ iowrite32(sel->rdwr_en_lo_cnt, denali->reg + RDWR_EN_LO_CNT);
+ iowrite32(sel->rdwr_en_hi_cnt, denali->reg + RDWR_EN_HI_CNT);
+ iowrite32(sel->cs_setup_cnt, denali->reg + CS_SETUP_CNT);
+ iowrite32(sel->re_2_re, denali->reg + RE_2_RE);
+}
+
+static int denali_change_column(struct nand_chip *chip, unsigned int offset,
+ void *buf, unsigned int len, bool write)
+{
+ if (write)
+ return nand_change_write_column_op(chip, offset, buf, len,
+ false);
+ else
+ return nand_change_read_column_op(chip, offset, buf, len,
+ false);
+}
+
+static int denali_payload_xfer(struct nand_chip *chip, void *buf, bool write)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int writesize = mtd->writesize;
+ int oob_skip = denali->oob_skip_bytes;
+ int ret, i, pos, len;
+
+ for (i = 0; i < ecc->steps; i++) {
+ pos = i * (ecc->size + ecc->bytes);
+ len = ecc->size;
+
+ if (pos >= writesize) {
+ pos += oob_skip;
+ } else if (pos + len > writesize) {
+ /* This chunk overwraps the BBM area. Must be split */
+ ret = denali_change_column(chip, pos, buf,
+ writesize - pos, write);
+ if (ret)
+ return ret;
+
+ buf += writesize - pos;
+ len -= writesize - pos;
+ pos = writesize + oob_skip;
+ }
+
+ ret = denali_change_column(chip, pos, buf, len, write);
+ if (ret)
+ return ret;
+
+ buf += len;
+ }
+
+ return 0;
+}
+
+static int denali_oob_xfer(struct nand_chip *chip, void *buf, bool write)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int oob_skip = denali->oob_skip_bytes;
+ int ret, i, pos, len;
+
+ /* BBM at the beginning of the OOB area */
+ ret = denali_change_column(chip, writesize, buf, oob_skip, write);
+ if (ret)
+ return ret;
+
+ buf += oob_skip;
+
+ for (i = 0; i < ecc->steps; i++) {
+ pos = ecc->size + i * (ecc->size + ecc->bytes);
+
+ if (i == ecc->steps - 1)
+ /* The last chunk includes OOB free */
+ len = writesize + oobsize - pos - oob_skip;
+ else
+ len = ecc->bytes;
+
+ if (pos >= writesize) {
+ pos += oob_skip;
+ } else if (pos + len > writesize) {
+ /* This chunk overwraps the BBM area. Must be split */
+ ret = denali_change_column(chip, pos, buf,
+ writesize - pos, write);
+ if (ret)
+ return ret;
+
+ buf += writesize - pos;
+ len -= writesize - pos;
+ pos = writesize + oob_skip;
+ }
+
+ ret = denali_change_column(chip, pos, buf, len, write);
+ if (ret)
+ return ret;
+
+ buf += len;
+ }
+
+ return 0;
+}
+
+static int denali_read_raw(struct nand_chip *chip, void *buf, void *oob_buf,
+ int page)
+{
+ int ret;
+
+ if (!buf && !oob_buf)
+ return -EINVAL;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (buf) {
+ ret = denali_payload_xfer(chip, buf, false);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_buf) {
+ ret = denali_oob_xfer(chip, oob_buf, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int denali_write_raw(struct nand_chip *chip, const void *buf,
+ const void *oob_buf, int page)
+{
+ int ret;
+
+ if (!buf && !oob_buf)
+ return -EINVAL;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (buf) {
+ ret = denali_payload_xfer(chip, (void *)buf, true);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_buf) {
+ ret = denali_oob_xfer(chip, (void *)oob_buf, true);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int denali_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return denali_read_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
+ page);
+}
+
+static int denali_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ return denali_write_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
+ page);
+}
+
+static int denali_read_oob(struct nand_chip *chip, int page)
+{
+ return denali_read_raw(chip, NULL, chip->oob_poi, page);
+}
+
+static int denali_write_oob(struct nand_chip *chip, int page)
+{
+ return denali_write_raw(chip, NULL, chip->oob_poi, page);
+}
+
+static int denali_check_erased_page(struct nand_chip *chip, u8 *buf,
+ unsigned long uncor_ecc_flags,
+ unsigned int max_bitflips)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
+ int i, stat;
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (!(uncor_ecc_flags & BIT(i)))
+ continue;
+
+ stat = nand_check_erased_ecc_chunk(buf, ecc->size, ecc_code,
+ ecc->bytes, NULL, 0,
+ ecc->strength);
+ if (stat < 0) {
+ ecc_stats->failed++;
+ } else {
+ ecc_stats->corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ buf += ecc->size;
+ ecc_code += ecc->bytes;
+ }
+
+ return max_bitflips;
+}
+
+static int denali_hw_ecc_fixup(struct nand_chip *chip,
+ unsigned long *uncor_ecc_flags)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
+ int bank = denali->active_bank;
+ u32 ecc_cor;
+ unsigned int max_bitflips;
+
+ ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
+ ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
+
+ if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
+ /*
+ * This flag is set when uncorrectable error occurs at least in
+ * one ECC sector. We can not know "how many sectors", or
+ * "which sector(s)". We need erase-page check for all sectors.
+ */
+ *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
+ return 0;
+ }
+
+ max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
+
+ /*
+ * The register holds the maximum of per-sector corrected bitflips.
+ * This is suitable for the return value of the ->read_page() callback.
+ * Unfortunately, we can not know the total number of corrected bits in
+ * the page. Increase the stats by max_bitflips. (compromised solution)
+ */
+ ecc_stats->corrected += max_bitflips;
+
+ return max_bitflips;
+}
+
+static int denali_sw_ecc_fixup(struct nand_chip *chip,
+ unsigned long *uncor_ecc_flags, u8 *buf)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
+ unsigned int ecc_size = chip->ecc.size;
+ unsigned int bitflips = 0;
+ unsigned int max_bitflips = 0;
+ u32 err_addr, err_cor_info;
+ unsigned int err_byte, err_sector, err_device;
+ u8 err_cor_value;
+ unsigned int prev_sector = 0;
+ u32 irq_status;
+
+ denali_reset_irq(denali);
+
+ do {
+ err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
+ err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
+ err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
+
+ err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
+ err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
+ err_cor_info);
+ err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
+ err_cor_info);
+
+ /* reset the bitflip counter when crossing ECC sector */
+ if (err_sector != prev_sector)
+ bitflips = 0;
+
+ if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
+ /*
+ * Check later if this is a real ECC error, or
+ * an erased sector.
+ */
+ *uncor_ecc_flags |= BIT(err_sector);
+ } else if (err_byte < ecc_size) {
+ /*
+ * If err_byte is larger than ecc_size, means error
+ * happened in OOB, so we ignore it. It's no need for
+ * us to correct it err_device is represented the NAND
+ * error bits are happened in if there are more than
+ * one NAND connected.
+ */
+ int offset;
+ unsigned int flips_in_byte;
+
+ offset = (err_sector * ecc_size + err_byte) *
+ denali->devs_per_cs + err_device;
+
+ /* correct the ECC error */
+ flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
+ buf[offset] ^= err_cor_value;
+ ecc_stats->corrected += flips_in_byte;
+ bitflips += flips_in_byte;
+
+ max_bitflips = max(max_bitflips, bitflips);
+ }
+
+ prev_sector = err_sector;
+ } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
+
+ /*
+ * Once handle all ECC errors, controller will trigger an
+ * ECC_TRANSACTION_DONE interrupt.
+ */
+ irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
+ if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
+ return -EIO;
+
+ return max_bitflips;
+}
+
+static void denali_setup_dma64(struct denali_controller *denali,
+ dma_addr_t dma_addr, int page, bool write)
+{
+ u32 mode;
+ const int page_count = 1;
+
+ mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
+
+ /* DMA is a three step process */
+
+ /*
+ * 1. setup transfer type, interrupt when complete,
+ * burst len = 64 bytes, the number of pages
+ */
+ denali->host_write(denali, mode,
+ 0x01002000 | (64 << 16) |
+ (write ? BIT(8) : 0) | page_count);
+
+ /* 2. set memory low address */
+ denali->host_write(denali, mode, lower_32_bits(dma_addr));
+
+ /* 3. set memory high address */
+ denali->host_write(denali, mode, upper_32_bits(dma_addr));
+}
+
+static void denali_setup_dma32(struct denali_controller *denali,
+ dma_addr_t dma_addr, int page, bool write)
+{
+ u32 mode;
+ const int page_count = 1;
+
+ mode = DENALI_MAP10 | DENALI_BANK(denali);
+
+ /* DMA is a four step process */
+
+ /* 1. setup transfer type and # of pages */
+ denali->host_write(denali, mode | page,
+ 0x2000 | (write ? BIT(8) : 0) | page_count);
+
+ /* 2. set memory high address bits 23:8 */
+ denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
+
+ /* 3. set memory low address bits 23:8 */
+ denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
+
+ /* 4. interrupt when complete, burst len = 64 bytes */
+ denali->host_write(denali, mode | 0x14000, 0x2400);
+}
+
+static int denali_pio_read(struct denali_controller *denali, u32 *buf,
+ size_t size, int page)
+{
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
+ u32 irq_status, ecc_err_mask;
+ int i;
+
+ if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
+ ecc_err_mask = INTR__ECC_UNCOR_ERR;
+ else
+ ecc_err_mask = INTR__ECC_ERR;
+
+ denali_reset_irq(denali);
+
+ for (i = 0; i < size / 4; i++)
+ buf[i] = denali->host_read(denali, addr);
+
+ irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
+ if (!(irq_status & INTR__PAGE_XFER_INC))
+ return -EIO;
+
+ if (irq_status & INTR__ERASED_PAGE)
+ memset(buf, 0xff, size);
+
+ return irq_status & ecc_err_mask ? -EBADMSG : 0;
+}
+
+static int denali_pio_write(struct denali_controller *denali, const u32 *buf,
+ size_t size, int page)
+{
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
+ u32 irq_status;
+ int i;
+
+ denali_reset_irq(denali);
+
+ for (i = 0; i < size / 4; i++)
+ denali->host_write(denali, addr, buf[i]);
+
+ irq_status = denali_wait_for_irq(denali,
+ INTR__PROGRAM_COMP |
+ INTR__PROGRAM_FAIL);
+ if (!(irq_status & INTR__PROGRAM_COMP))
+ return -EIO;
+
+ return 0;
+}
+
+static int denali_pio_xfer(struct denali_controller *denali, void *buf,
+ size_t size, int page, bool write)
+{
+ if (write)
+ return denali_pio_write(denali, buf, size, page);
+ else
+ return denali_pio_read(denali, buf, size, page);
+}
+
+static int denali_dma_xfer(struct denali_controller *denali, void *buf,
+ size_t size, int page, bool write)
+{
+ dma_addr_t dma_addr;
+ u32 irq_mask, irq_status, ecc_err_mask;
+ enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ dma_addr = dma_map_single(denali->dev, buf, size, dir);
+ if (dma_mapping_error(denali->dev, dma_addr)) {
+ dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
+ return denali_pio_xfer(denali, buf, size, page, write);
+ }
+
+ if (write) {
+ /*
+ * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
+ * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
+ * when the page program is completed.
+ */
+ irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
+ ecc_err_mask = 0;
+ } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
+ irq_mask = INTR__DMA_CMD_COMP;
+ ecc_err_mask = INTR__ECC_UNCOR_ERR;
+ } else {
+ irq_mask = INTR__DMA_CMD_COMP;
+ ecc_err_mask = INTR__ECC_ERR;
+ }
+
+ iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
+ /*
+ * The ->setup_dma() hook kicks DMA by using the data/command
+ * interface, which belongs to a different AXI port from the
+ * register interface. Read back the register to avoid a race.
+ */
+ ioread32(denali->reg + DMA_ENABLE);
+
+ denali_reset_irq(denali);
+ denali->setup_dma(denali, dma_addr, page, write);
+
+ irq_status = denali_wait_for_irq(denali, irq_mask);
+ if (!(irq_status & INTR__DMA_CMD_COMP))
+ ret = -EIO;
+ else if (irq_status & ecc_err_mask)
+ ret = -EBADMSG;
+
+ iowrite32(0, denali->reg + DMA_ENABLE);
+
+ dma_unmap_single(denali->dev, dma_addr, size, dir);
+
+ if (irq_status & INTR__ERASED_PAGE)
+ memset(buf, 0xff, size);
+
+ return ret;
+}
+
+static int denali_page_xfer(struct nand_chip *chip, void *buf, size_t size,
+ int page, bool write)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ denali_select_target(chip, chip->cur_cs);
+
+ if (denali->dma_avail)
+ return denali_dma_xfer(denali, buf, size, page, write);
+ else
+ return denali_pio_xfer(denali, buf, size, page, write);
+}
+
+static int denali_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned long uncor_ecc_flags = 0;
+ int stat = 0;
+ int ret;
+
+ ret = denali_page_xfer(chip, buf, mtd->writesize, page, false);
+ if (ret && ret != -EBADMSG)
+ return ret;
+
+ if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
+ stat = denali_hw_ecc_fixup(chip, &uncor_ecc_flags);
+ else if (ret == -EBADMSG)
+ stat = denali_sw_ecc_fixup(chip, &uncor_ecc_flags, buf);
+
+ if (stat < 0)
+ return stat;
+
+ if (uncor_ecc_flags) {
+ ret = denali_read_oob(chip, page);
+ if (ret)
+ return ret;
+
+ stat = denali_check_erased_page(chip, buf,
+ uncor_ecc_flags, stat);
+ }
+
+ return stat;
+}
+
+static int denali_write_page(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
+}
+
+static int denali_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ static const unsigned int data_setup_on_host = 10000;
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct denali_chip_sel *sel;
+ const struct nand_sdr_timings *timings;
+ unsigned long t_x, mult_x;
+ int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
+ int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
+ int addr_2_data_mask;
+ u32 tmp;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ /* clk_x period in picoseconds */
+ t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
+ if (!t_x)
+ return -EINVAL;
+
+ /*
+ * The bus interface clock, clk_x, is phase aligned with the core clock.
+ * The clk_x is an integral multiple N of the core clk. The value N is
+ * configured at IP delivery time, and its available value is 4, 5, 6.
+ */
+ mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
+ if (mult_x < 4 || mult_x > 6)
+ return -EINVAL;
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ sel = &to_denali_chip(chip)->sels[chipnr];
+
+ /* tRWH -> RE_2_WE */
+ re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
+ re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
+
+ tmp = ioread32(denali->reg + RE_2_WE);
+ tmp &= ~RE_2_WE__VALUE;
+ tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
+ sel->re_2_we = tmp;
+
+ /* tRHZ -> RE_2_RE */
+ re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
+ re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
+
+ tmp = ioread32(denali->reg + RE_2_RE);
+ tmp &= ~RE_2_RE__VALUE;
+ tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
+ sel->re_2_re = tmp;
+
+ /*
+ * tCCS, tWHR -> WE_2_RE
+ *
+ * With WE_2_RE properly set, the Denali controller automatically takes
+ * care of the delay; the driver need not set NAND_WAIT_TCCS.
+ */
+ we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
+ we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
+
+ tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
+ tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
+ tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
+ sel->hwhr2_and_we_2_re = tmp;
+
+ /* tADL -> ADDR_2_DATA */
+
+ /* for older versions, ADDR_2_DATA is only 6 bit wide */
+ addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ if (denali->revision < 0x0501)
+ addr_2_data_mask >>= 1;
+
+ addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
+ addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
+
+ tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
+ tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
+ sel->tcwaw_and_addr_2_data = tmp;
+
+ /* tREH, tWH -> RDWR_EN_HI_CNT */
+ rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
+ t_x);
+ rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
+
+ tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
+ tmp &= ~RDWR_EN_HI_CNT__VALUE;
+ tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
+ sel->rdwr_en_hi_cnt = tmp;
+
+ /*
+ * tREA -> ACC_CLKS
+ * tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT
+ */
+
+ /*
+ * Determine the minimum of acc_clks to meet the setup timing when
+ * capturing the incoming data.
+ *
+ * The delay on the chip side is well-defined as tREA, but we need to
+ * take additional delay into account. This includes a certain degree
+ * of unknowledge, such as signal propagation delays on the PCB and
+ * in the SoC, load capacity of the I/O pins, etc.
+ */
+ acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x);
+
+ /* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */
+ rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
+
+ /* Extend rdwr_en_lo to meet the data hold timing */
+ rdwr_en_lo = max_t(int, rdwr_en_lo,
+ acc_clks - timings->tRHOH_min / t_x);
+
+ /* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */
+ rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
+ t_x);
+ rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
+ rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
+
+ /* Center the data latch timing for extra safety */
+ acc_clks = (acc_clks + rdwr_en_lo +
+ DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2;
+ acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
+
+ tmp = ioread32(denali->reg + ACC_CLKS);
+ tmp &= ~ACC_CLKS__VALUE;
+ tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
+ sel->acc_clks = tmp;
+
+ tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
+ tmp &= ~RDWR_EN_LO_CNT__VALUE;
+ tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
+ sel->rdwr_en_lo_cnt = tmp;
+
+ /* tCS, tCEA -> CS_SETUP_CNT */
+ cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
+ (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
+ 0);
+ cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
+
+ tmp = ioread32(denali->reg + CS_SETUP_CNT);
+ tmp &= ~CS_SETUP_CNT__VALUE;
+ tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
+ sel->cs_setup_cnt = tmp;
+
+ return 0;
+}
+
+int denali_calc_ecc_bytes(int step_size, int strength)
+{
+ /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
+ return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
+}
+EXPORT_SYMBOL(denali_calc_ecc_bytes);
+
+static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = denali->oob_skip_bytes;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int denali_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
+ .ecc = denali_ooblayout_ecc,
+ .free = denali_ooblayout_free,
+};
+
+static int denali_multidev_fixup(struct nand_chip *chip)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /*
+ * Support for multi device:
+ * When the IP configuration is x16 capable and two x8 chips are
+ * connected in parallel, DEVICES_CONNECTED should be set to 2.
+ * In this case, the core framework knows nothing about this fact,
+ * so we should tell it the _logical_ pagesize and anything necessary.
+ */
+ denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
+
+ /*
+ * On some SoCs, DEVICES_CONNECTED is not auto-detected.
+ * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
+ */
+ if (denali->devs_per_cs == 0) {
+ denali->devs_per_cs = 1;
+ iowrite32(1, denali->reg + DEVICES_CONNECTED);
+ }
+
+ if (denali->devs_per_cs == 1)
+ return 0;
+
+ if (denali->devs_per_cs != 2) {
+ dev_err(denali->dev, "unsupported number of devices %d\n",
+ denali->devs_per_cs);
+ return -EINVAL;
+ }
+
+ /* 2 chips in parallel */
+ memorg->pagesize <<= 1;
+ memorg->oobsize <<= 1;
+ mtd->size <<= 1;
+ mtd->erasesize <<= 1;
+ mtd->writesize <<= 1;
+ mtd->oobsize <<= 1;
+ chip->page_shift += 1;
+ chip->phys_erase_shift += 1;
+ chip->bbt_erase_shift += 1;
+ chip->chip_shift += 1;
+ chip->pagemask <<= 1;
+ chip->ecc.size <<= 1;
+ chip->ecc.bytes <<= 1;
+ chip->ecc.strength <<= 1;
+ denali->oob_skip_bytes <<= 1;
+
+ return 0;
+}
+
+static int denali_attach_chip(struct nand_chip *chip)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
+ mtd->oobsize - denali->oob_skip_bytes);
+ if (ret) {
+ dev_err(denali->dev, "Failed to setup ECC settings.\n");
+ return ret;
+ }
+
+ dev_dbg(denali->dev,
+ "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
+ chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
+
+ ret = denali_multidev_fixup(chip);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void denali_exec_in8(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = denali->host_read(denali, type | DENALI_BANK(denali));
+}
+
+static void denali_exec_in16(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < len; i += 2) {
+ data = denali->host_read(denali, type | DENALI_BANK(denali));
+ /* bit 31:24 and 15:8 are used for DDR */
+ buf[i] = data;
+ buf[i + 1] = data >> 16;
+ }
+}
+
+static void denali_exec_in(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len, bool width16)
+{
+ if (width16)
+ denali_exec_in16(denali, type, buf, len);
+ else
+ denali_exec_in8(denali, type, buf, len);
+}
+
+static void denali_exec_out8(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ denali->host_write(denali, type | DENALI_BANK(denali), buf[i]);
+}
+
+static void denali_exec_out16(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i += 2)
+ denali->host_write(denali, type | DENALI_BANK(denali),
+ buf[i + 1] << 16 | buf[i]);
+}
+
+static void denali_exec_out(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len, bool width16)
+{
+ if (width16)
+ denali_exec_out16(denali, type, buf, len);
+ else
+ denali_exec_out8(denali, type, buf, len);
+}
+
+static int denali_exec_waitrdy(struct denali_controller *denali)
+{
+ u32 irq_stat;
+
+ /* R/B# pin transitioned from low to high? */
+ irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT);
+
+ /* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */
+ denali_reset_irq(denali);
+
+ return irq_stat & INTR__INT_ACT ? 0 : -EIO;
+}
+
+static int denali_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ denali_exec_out8(denali, DENALI_MAP11_CMD,
+ &instr->ctx.cmd.opcode, 1);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ denali_exec_out8(denali, DENALI_MAP11_ADDR,
+ instr->ctx.addr.addrs,
+ instr->ctx.addr.naddrs);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ denali_exec_in(denali, DENALI_MAP11_DATA,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ !instr->ctx.data.force_8bit &&
+ chip->options & NAND_BUSWIDTH_16);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ denali_exec_out(denali, DENALI_MAP11_DATA,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ !instr->ctx.data.force_8bit &&
+ chip->options & NAND_BUSWIDTH_16);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ return denali_exec_waitrdy(denali);
+ default:
+ WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
+ instr->type);
+
+ return -EINVAL;
+ }
+}
+
+static int denali_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ int i, ret;
+
+ if (check_only)
+ return 0;
+
+ denali_select_target(chip, op->cs);
+
+ /*
+ * Some commands contain NAND_OP_WAITRDY_INSTR.
+ * irq must be cleared here to catch the R/B# interrupt there.
+ */
+ denali_reset_irq(to_denali_controller(chip));
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = denali_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops denali_controller_ops = {
+ .attach_chip = denali_attach_chip,
+ .exec_op = denali_exec_op,
+ .setup_interface = denali_setup_interface,
+};
+
+int denali_chip_init(struct denali_controller *denali,
+ struct denali_chip *dchip)
+{
+ struct nand_chip *chip = &dchip->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct denali_chip *dchip2;
+ int i, j, ret;
+
+ chip->controller = &denali->controller;
+
+ /* sanity checks for bank numbers */
+ for (i = 0; i < dchip->nsels; i++) {
+ unsigned int bank = dchip->sels[i].bank;
+
+ if (bank >= denali->nbanks) {
+ dev_err(denali->dev, "unsupported bank %d\n", bank);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < i; j++) {
+ if (bank == dchip->sels[j].bank) {
+ dev_err(denali->dev,
+ "bank %d is assigned twice in the same chip\n",
+ bank);
+ return -EINVAL;
+ }
+ }
+
+ list_for_each_entry(dchip2, &denali->chips, node) {
+ for (j = 0; j < dchip2->nsels; j++) {
+ if (bank == dchip2->sels[j].bank) {
+ dev_err(denali->dev,
+ "bank %d is already used\n",
+ bank);
+ return -EINVAL;
+ }
+ }
+ }
+ }
+
+ mtd->dev.parent = denali->dev;
+
+ /*
+ * Fallback to the default name if DT did not give "label" property.
+ * Use "label" property if multiple chips are connected.
+ */
+ if (!mtd->name && list_empty(&denali->chips))
+ mtd->name = "denali-nand";
+
+ if (denali->dma_avail) {
+ chip->options |= NAND_USES_DMA;
+ chip->buf_align = 16;
+ }
+
+ /* clk rate info is needed for setup_interface */
+ if (!denali->clk_rate || !denali->clk_x_rate)
+ chip->options |= NAND_KEEP_TIMINGS;
+
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ chip->ecc.read_page = denali_read_page;
+ chip->ecc.write_page = denali_write_page;
+ chip->ecc.read_page_raw = denali_read_page_raw;
+ chip->ecc.write_page_raw = denali_write_page_raw;
+ chip->ecc.read_oob = denali_read_oob;
+ chip->ecc.write_oob = denali_write_oob;
+
+ mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
+
+ ret = nand_scan(chip, dchip->nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
+ goto cleanup_nand;
+ }
+
+ list_add_tail(&dchip->node, &denali->chips);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(chip);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(denali_chip_init);
+
+int denali_init(struct denali_controller *denali)
+{
+ u32 features = ioread32(denali->reg + FEATURES);
+ int ret;
+
+ nand_controller_init(&denali->controller);
+ denali->controller.ops = &denali_controller_ops;
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
+ INIT_LIST_HEAD(&denali->chips);
+ denali->active_bank = DENALI_INVALID_BANK;
+
+ /*
+ * The REVISION register may not be reliable. Platforms are allowed to
+ * override it.
+ */
+ if (!denali->revision)
+ denali->revision = swab16(ioread32(denali->reg + REVISION));
+
+ denali->nbanks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
+
+ /* the encoding changed from rev 5.0 to 5.1 */
+ if (denali->revision < 0x0501)
+ denali->nbanks <<= 1;
+
+ if (features & FEATURES__DMA)
+ denali->dma_avail = true;
+
+ if (denali->dma_avail) {
+ int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
+
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
+ if (ret) {
+ dev_info(denali->dev,
+ "Failed to set DMA mask. Disabling DMA.\n");
+ denali->dma_avail = false;
+ }
+ }
+
+ if (denali->dma_avail) {
+ if (denali->caps & DENALI_CAP_DMA_64BIT)
+ denali->setup_dma = denali_setup_dma64;
+ else
+ denali->setup_dma = denali_setup_dma32;
+ }
+
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
+ /*
+ * Set how many bytes should be skipped before writing data in OOB.
+ * If a platform requests a non-zero value, set it to the register.
+ * Otherwise, read the value out, expecting it has already been set up
+ * by firmware.
+ */
+ if (denali->oob_skip_bytes)
+ iowrite32(denali->oob_skip_bytes,
+ denali->reg + SPARE_AREA_SKIP_BYTES);
+ else
+ denali->oob_skip_bytes = ioread32(denali->reg +
+ SPARE_AREA_SKIP_BYTES);
+
+ iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
+ iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
+ iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+ iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
+ iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
+
+ denali_clear_irq_all(denali);
+
+ ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
+ IRQF_SHARED, DENALI_NAND_NAME, denali);
+ if (ret) {
+ dev_err(denali->dev, "Unable to request IRQ\n");
+ return ret;
+ }
+
+ denali_enable_irq(denali);
+
+ return 0;
+}
+EXPORT_SYMBOL(denali_init);
+
+void denali_remove(struct denali_controller *denali)
+{
+ struct denali_chip *dchip, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(dchip, tmp, &denali->chips, node) {
+ chip = &dchip->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&dchip->node);
+ }
+
+ denali_disable_irq(denali);
+}
+EXPORT_SYMBOL(denali_remove);
+
+MODULE_DESCRIPTION("Driver core for Denali NAND controller");
+MODULE_AUTHOR("Intel Corporation and its suppliers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
new file mode 100644
index 000000000..ac46eb795
--- /dev/null
+++ b/drivers/mtd/nand/raw/denali.h
@@ -0,0 +1,398 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
+ */
+
+#ifndef __DENALI_H__
+#define __DENALI_H__
+
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#define DEVICE_RESET 0x0
+#define DEVICE_RESET__BANK(bank) BIT(bank)
+
+#define TRANSFER_SPARE_REG 0x10
+#define TRANSFER_SPARE_REG__FLAG BIT(0)
+
+#define LOAD_WAIT_CNT 0x20
+#define LOAD_WAIT_CNT__VALUE GENMASK(15, 0)
+
+#define PROGRAM_WAIT_CNT 0x30
+#define PROGRAM_WAIT_CNT__VALUE GENMASK(15, 0)
+
+#define ERASE_WAIT_CNT 0x40
+#define ERASE_WAIT_CNT__VALUE GENMASK(15, 0)
+
+#define INT_MON_CYCCNT 0x50
+#define INT_MON_CYCCNT__VALUE GENMASK(15, 0)
+
+#define RB_PIN_ENABLED 0x60
+#define RB_PIN_ENABLED__BANK(bank) BIT(bank)
+
+#define MULTIPLANE_OPERATION 0x70
+#define MULTIPLANE_OPERATION__FLAG BIT(0)
+
+#define MULTIPLANE_READ_ENABLE 0x80
+#define MULTIPLANE_READ_ENABLE__FLAG BIT(0)
+
+#define COPYBACK_DISABLE 0x90
+#define COPYBACK_DISABLE__FLAG BIT(0)
+
+#define CACHE_WRITE_ENABLE 0xa0
+#define CACHE_WRITE_ENABLE__FLAG BIT(0)
+
+#define CACHE_READ_ENABLE 0xb0
+#define CACHE_READ_ENABLE__FLAG BIT(0)
+
+#define PREFETCH_MODE 0xc0
+#define PREFETCH_MODE__PREFETCH_EN BIT(0)
+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH GENMASK(15, 4)
+
+#define CHIP_ENABLE_DONT_CARE 0xd0
+#define CHIP_EN_DONT_CARE__FLAG BIT(0)
+
+#define ECC_ENABLE 0xe0
+#define ECC_ENABLE__FLAG BIT(0)
+
+#define GLOBAL_INT_ENABLE 0xf0
+#define GLOBAL_INT_EN_FLAG BIT(0)
+
+#define TWHR2_AND_WE_2_RE 0x100
+#define TWHR2_AND_WE_2_RE__WE_2_RE GENMASK(5, 0)
+#define TWHR2_AND_WE_2_RE__TWHR2 GENMASK(13, 8)
+
+#define TCWAW_AND_ADDR_2_DATA 0x110
+/* The width of ADDR_2_DATA is 6 bit for old IP, 7 bit for new IP */
+#define TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA GENMASK(6, 0)
+#define TCWAW_AND_ADDR_2_DATA__TCWAW GENMASK(13, 8)
+
+#define RE_2_WE 0x120
+#define RE_2_WE__VALUE GENMASK(5, 0)
+
+#define ACC_CLKS 0x130
+#define ACC_CLKS__VALUE GENMASK(3, 0)
+
+#define NUMBER_OF_PLANES 0x140
+#define NUMBER_OF_PLANES__VALUE GENMASK(2, 0)
+
+#define PAGES_PER_BLOCK 0x150
+#define PAGES_PER_BLOCK__VALUE GENMASK(15, 0)
+
+#define DEVICE_WIDTH 0x160
+#define DEVICE_WIDTH__VALUE GENMASK(1, 0)
+
+#define DEVICE_MAIN_AREA_SIZE 0x170
+#define DEVICE_MAIN_AREA_SIZE__VALUE GENMASK(15, 0)
+
+#define DEVICE_SPARE_AREA_SIZE 0x180
+#define DEVICE_SPARE_AREA_SIZE__VALUE GENMASK(15, 0)
+
+#define TWO_ROW_ADDR_CYCLES 0x190
+#define TWO_ROW_ADDR_CYCLES__FLAG BIT(0)
+
+#define MULTIPLANE_ADDR_RESTRICT 0x1a0
+#define MULTIPLANE_ADDR_RESTRICT__FLAG BIT(0)
+
+#define ECC_CORRECTION 0x1b0
+#define ECC_CORRECTION__VALUE GENMASK(4, 0)
+#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
+
+#define READ_MODE 0x1c0
+#define READ_MODE__VALUE GENMASK(3, 0)
+
+#define WRITE_MODE 0x1d0
+#define WRITE_MODE__VALUE GENMASK(3, 0)
+
+#define COPYBACK_MODE 0x1e0
+#define COPYBACK_MODE__VALUE GENMASK(3, 0)
+
+#define RDWR_EN_LO_CNT 0x1f0
+#define RDWR_EN_LO_CNT__VALUE GENMASK(4, 0)
+
+#define RDWR_EN_HI_CNT 0x200
+#define RDWR_EN_HI_CNT__VALUE GENMASK(4, 0)
+
+#define MAX_RD_DELAY 0x210
+#define MAX_RD_DELAY__VALUE GENMASK(3, 0)
+
+#define CS_SETUP_CNT 0x220
+#define CS_SETUP_CNT__VALUE GENMASK(4, 0)
+#define CS_SETUP_CNT__TWB GENMASK(17, 12)
+
+#define SPARE_AREA_SKIP_BYTES 0x230
+#define SPARE_AREA_SKIP_BYTES__VALUE GENMASK(5, 0)
+
+#define SPARE_AREA_MARKER 0x240
+#define SPARE_AREA_MARKER__VALUE GENMASK(15, 0)
+
+#define DEVICES_CONNECTED 0x250
+#define DEVICES_CONNECTED__VALUE GENMASK(2, 0)
+
+#define DIE_MASK 0x260
+#define DIE_MASK__VALUE GENMASK(7, 0)
+
+#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE GENMASK(15, 0)
+
+#define WRITE_PROTECT 0x280
+#define WRITE_PROTECT__FLAG BIT(0)
+
+#define RE_2_RE 0x290
+#define RE_2_RE__VALUE GENMASK(5, 0)
+
+#define MANUFACTURER_ID 0x300
+#define MANUFACTURER_ID__VALUE GENMASK(7, 0)
+
+#define DEVICE_ID 0x310
+#define DEVICE_ID__VALUE GENMASK(7, 0)
+
+#define DEVICE_PARAM_0 0x320
+#define DEVICE_PARAM_0__VALUE GENMASK(7, 0)
+
+#define DEVICE_PARAM_1 0x330
+#define DEVICE_PARAM_1__VALUE GENMASK(7, 0)
+
+#define DEVICE_PARAM_2 0x340
+#define DEVICE_PARAM_2__VALUE GENMASK(7, 0)
+
+#define LOGICAL_PAGE_DATA_SIZE 0x350
+#define LOGICAL_PAGE_DATA_SIZE__VALUE GENMASK(15, 0)
+
+#define LOGICAL_PAGE_SPARE_SIZE 0x360
+#define LOGICAL_PAGE_SPARE_SIZE__VALUE GENMASK(15, 0)
+
+#define REVISION 0x370
+#define REVISION__VALUE GENMASK(15, 0)
+
+#define ONFI_DEVICE_FEATURES 0x380
+#define ONFI_DEVICE_FEATURES__VALUE GENMASK(5, 0)
+
+#define ONFI_OPTIONAL_COMMANDS 0x390
+#define ONFI_OPTIONAL_COMMANDS__VALUE GENMASK(5, 0)
+
+#define ONFI_TIMING_MODE 0x3a0
+#define ONFI_TIMING_MODE__VALUE GENMASK(5, 0)
+
+#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE GENMASK(5, 0)
+
+#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS GENMASK(7, 0)
+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE BIT(8)
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE GENMASK(15, 0)
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE GENMASK(15, 0)
+
+#define FEATURES 0x3f0
+#define FEATURES__N_BANKS GENMASK(1, 0)
+#define FEATURES__ECC_MAX_ERR GENMASK(5, 2)
+#define FEATURES__DMA BIT(6)
+#define FEATURES__CMD_DMA BIT(7)
+#define FEATURES__PARTITION BIT(8)
+#define FEATURES__XDMA_SIDEBAND BIT(9)
+#define FEATURES__GPREG BIT(10)
+#define FEATURES__INDEX_ADDR BIT(11)
+
+#define TRANSFER_MODE 0x400
+#define TRANSFER_MODE__VALUE GENMASK(1, 0)
+
+#define INTR_STATUS(bank) (0x410 + (bank) * 0x50)
+#define INTR_EN(bank) (0x420 + (bank) * 0x50)
+/* bit[1:0] is used differently depending on IP version */
+#define INTR__ECC_UNCOR_ERR BIT(0) /* new IP */
+#define INTR__ECC_TRANSACTION_DONE BIT(0) /* old IP */
+#define INTR__ECC_ERR BIT(1) /* old IP */
+#define INTR__DMA_CMD_COMP BIT(2)
+#define INTR__TIME_OUT BIT(3)
+#define INTR__PROGRAM_FAIL BIT(4)
+#define INTR__ERASE_FAIL BIT(5)
+#define INTR__LOAD_COMP BIT(6)
+#define INTR__PROGRAM_COMP BIT(7)
+#define INTR__ERASE_COMP BIT(8)
+#define INTR__PIPE_CPYBCK_CMD_COMP BIT(9)
+#define INTR__LOCKED_BLK BIT(10)
+#define INTR__UNSUP_CMD BIT(11)
+#define INTR__INT_ACT BIT(12)
+#define INTR__RST_COMP BIT(13)
+#define INTR__PIPE_CMD_ERR BIT(14)
+#define INTR__PAGE_XFER_INC BIT(15)
+#define INTR__ERASED_PAGE BIT(16)
+
+#define PAGE_CNT(bank) (0x430 + (bank) * 0x50)
+#define ERR_PAGE_ADDR(bank) (0x440 + (bank) * 0x50)
+#define ERR_BLOCK_ADDR(bank) (0x450 + (bank) * 0x50)
+
+#define ECC_THRESHOLD 0x600
+#define ECC_THRESHOLD__VALUE GENMASK(9, 0)
+
+#define ECC_ERROR_BLOCK_ADDRESS 0x610
+#define ECC_ERROR_BLOCK_ADDRESS__VALUE GENMASK(15, 0)
+
+#define ECC_ERROR_PAGE_ADDRESS 0x620
+#define ECC_ERROR_PAGE_ADDRESS__VALUE GENMASK(11, 0)
+#define ECC_ERROR_PAGE_ADDRESS__BANK GENMASK(15, 12)
+
+#define ECC_ERROR_ADDRESS 0x630
+#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
+#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12)
+
+#define ERR_CORRECTION_INFO 0x640
+#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0)
+#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8)
+#define ERR_CORRECTION_INFO__UNCOR BIT(14)
+#define ERR_CORRECTION_INFO__LAST_ERR BIT(15)
+
+#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
+#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
+#define ECC_COR_INFO__MAX_ERRORS GENMASK(6, 0)
+#define ECC_COR_INFO__UNCOR_ERR BIT(7)
+
+#define CFG_DATA_BLOCK_SIZE 0x6b0
+
+#define CFG_LAST_DATA_BLOCK_SIZE 0x6c0
+
+#define CFG_NUM_DATA_BLOCKS 0x6d0
+
+#define CFG_META_DATA_SIZE 0x6e0
+
+#define DMA_ENABLE 0x700
+#define DMA_ENABLE__FLAG BIT(0)
+
+#define IGNORE_ECC_DONE 0x710
+#define IGNORE_ECC_DONE__FLAG BIT(0)
+
+#define DMA_INTR 0x720
+#define DMA_INTR_EN 0x730
+#define DMA_INTR__TARGET_ERROR BIT(0)
+#define DMA_INTR__DESC_COMP_CHANNEL0 BIT(1)
+#define DMA_INTR__DESC_COMP_CHANNEL1 BIT(2)
+#define DMA_INTR__DESC_COMP_CHANNEL2 BIT(3)
+#define DMA_INTR__DESC_COMP_CHANNEL3 BIT(4)
+#define DMA_INTR__MEMCOPY_DESC_COMP BIT(5)
+
+#define TARGET_ERR_ADDR_LO 0x740
+#define TARGET_ERR_ADDR_LO__VALUE GENMASK(15, 0)
+
+#define TARGET_ERR_ADDR_HI 0x750
+#define TARGET_ERR_ADDR_HI__VALUE GENMASK(15, 0)
+
+#define CHNL_ACTIVE 0x760
+#define CHNL_ACTIVE__CHANNEL0 BIT(0)
+#define CHNL_ACTIVE__CHANNEL1 BIT(1)
+#define CHNL_ACTIVE__CHANNEL2 BIT(2)
+#define CHNL_ACTIVE__CHANNEL3 BIT(3)
+
+/**
+ * struct denali_chip_sel - per-CS data of Denali NAND
+ *
+ * @bank: bank id of the controller this CS is connected to
+ * @hwhr2_and_we_2_re: value of timing register HWHR2_AND_WE_2_RE
+ * @tcwaw_and_addr_2_data: value of timing register TCWAW_AND_ADDR_2_DATA
+ * @re_2_we: value of timing register RE_2_WE
+ * @acc_clks: value of timing register ACC_CLKS
+ * @rdwr_en_lo_cnt: value of timing register RDWR_EN_LO_CNT
+ * @rdwr_en_hi_cnt: value of timing register RDWR_EN_HI_CNT
+ * @cs_setup_cnt: value of timing register CS_SETUP_CNT
+ * @re_2_re: value of timing register RE_2_RE
+ */
+struct denali_chip_sel {
+ int bank;
+ u32 hwhr2_and_we_2_re;
+ u32 tcwaw_and_addr_2_data;
+ u32 re_2_we;
+ u32 acc_clks;
+ u32 rdwr_en_lo_cnt;
+ u32 rdwr_en_hi_cnt;
+ u32 cs_setup_cnt;
+ u32 re_2_re;
+};
+
+/**
+ * struct denali_chip - per-chip data of Denali NAND
+ *
+ * @chip: base NAND chip structure
+ * @node: node to be used to associate this chip with the controller
+ * @nsels: the number of CS lines of this chip
+ * @sels: the array of per-cs data
+ */
+struct denali_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ unsigned int nsels;
+ struct denali_chip_sel sels[];
+};
+
+/**
+ * struct denali_controller - Denali NAND controller data
+ *
+ * @controller: base NAND controller structure
+ * @dev: device
+ * @chips: the list of chips attached to this controller
+ * @clk_rate: frequency of core clock
+ * @clk_x_rate: frequency of bus interface clock
+ * @reg: base of Register Interface
+ * @host: base of Host Data/Command interface
+ * @complete: completion used to wait for interrupts
+ * @irq: interrupt number
+ * @irq_mask: interrupt bits the controller is waiting for
+ * @irq_status: interrupt bits of events that have happened
+ * @irq_lock: lock to protect @irq_mask and @irq_status
+ * @dma_avail: set if DMA engine is available
+ * @devs_per_cs: number of devices connected in parallel
+ * @oob_skip_bytes: number of bytes in OOB skipped by the ECC engine
+ * @active_bank: active bank id
+ * @nbanks: the number of banks supported by this controller
+ * @revision: IP revision
+ * @caps: controller capabilities that cannot be detected run-time
+ * @ecc_caps: ECC engine capabilities
+ * @host_read: callback for read access of Host Data/Command Interface
+ * @host_write: callback for write access of Host Data/Command Interface
+ * @setup_dma: callback for setup of the Data DMA
+ */
+struct denali_controller {
+ struct nand_controller controller;
+ struct device *dev;
+ struct list_head chips;
+ unsigned long clk_rate;
+ unsigned long clk_x_rate;
+ void __iomem *reg;
+ void __iomem *host;
+ struct completion complete;
+ int irq;
+ u32 irq_mask;
+ u32 irq_status;
+ spinlock_t irq_lock;
+ bool dma_avail;
+ int devs_per_cs;
+ int oob_skip_bytes;
+ int active_bank;
+ int nbanks;
+ unsigned int revision;
+ unsigned int caps;
+ const struct nand_ecc_caps *ecc_caps;
+ u32 (*host_read)(struct denali_controller *denali, u32 addr);
+ void (*host_write)(struct denali_controller *denali, u32 addr,
+ u32 data);
+ void (*setup_dma)(struct denali_controller *denali, dma_addr_t dma_addr,
+ int page, bool write);
+};
+
+#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
+#define DENALI_CAP_DMA_64BIT BIT(1)
+
+int denali_calc_ecc_bytes(int step_size, int strength);
+int denali_chip_init(struct denali_controller *denali,
+ struct denali_chip *dchip);
+int denali_init(struct denali_controller *denali);
+void denali_remove(struct denali_controller *denali);
+
+#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
new file mode 100644
index 000000000..f08740ae2
--- /dev/null
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NAND Flash Controller Device Driver for DT
+ *
+ * Copyright © 2011, Picochip.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "denali.h"
+
+struct denali_dt {
+ struct denali_controller controller;
+ struct clk *clk; /* core clock */
+ struct clk *clk_x; /* bus interface clock */
+ struct clk *clk_ecc; /* ECC circuit clock */
+ struct reset_control *rst; /* core reset */
+ struct reset_control *rst_reg; /* register reset */
+};
+
+struct denali_dt_data {
+ unsigned int revision;
+ unsigned int caps;
+ unsigned int oob_skip_bytes;
+ const struct nand_ecc_caps *ecc_caps;
+};
+
+NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes,
+ 512, 8, 15);
+static const struct denali_dt_data denali_socfpga_data = {
+ .caps = DENALI_CAP_HW_ECC_FIXUP,
+ .oob_skip_bytes = 2,
+ .ecc_caps = &denali_socfpga_ecc_caps,
+};
+
+NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes,
+ 1024, 8, 16, 24);
+static const struct denali_dt_data denali_uniphier_v5a_data = {
+ .caps = DENALI_CAP_HW_ECC_FIXUP |
+ DENALI_CAP_DMA_64BIT,
+ .oob_skip_bytes = 8,
+ .ecc_caps = &denali_uniphier_v5a_ecc_caps,
+};
+
+NAND_ECC_CAPS_SINGLE(denali_uniphier_v5b_ecc_caps, denali_calc_ecc_bytes,
+ 1024, 8, 16);
+static const struct denali_dt_data denali_uniphier_v5b_data = {
+ .revision = 0x0501,
+ .caps = DENALI_CAP_HW_ECC_FIXUP |
+ DENALI_CAP_DMA_64BIT,
+ .oob_skip_bytes = 8,
+ .ecc_caps = &denali_uniphier_v5b_ecc_caps,
+};
+
+static const struct of_device_id denali_nand_dt_ids[] = {
+ {
+ .compatible = "altr,socfpga-denali-nand",
+ .data = &denali_socfpga_data,
+ },
+ {
+ .compatible = "socionext,uniphier-denali-nand-v5a",
+ .data = &denali_uniphier_v5a_data,
+ },
+ {
+ .compatible = "socionext,uniphier-denali-nand-v5b",
+ .data = &denali_uniphier_v5b_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
+
+static int denali_dt_chip_init(struct denali_controller *denali,
+ struct device_node *chip_np)
+{
+ struct denali_chip *dchip;
+ u32 bank;
+ int nsels, i, ret;
+
+ nsels = of_property_count_u32_elems(chip_np, "reg");
+ if (nsels < 0)
+ return nsels;
+
+ dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
+ GFP_KERNEL);
+ if (!dchip)
+ return -ENOMEM;
+
+ dchip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(chip_np, "reg", i, &bank);
+ if (ret)
+ return ret;
+
+ dchip->sels[i].bank = bank;
+
+ nand_set_flash_node(&dchip->chip, chip_np);
+ }
+
+ return denali_chip_init(denali, dchip);
+}
+
+static int denali_dt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct denali_dt *dt;
+ const struct denali_dt_data *data;
+ struct denali_controller *denali;
+ struct device_node *np;
+ int ret;
+
+ dt = devm_kzalloc(dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+ denali = &dt->controller;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ denali->revision = data->revision;
+ denali->caps = data->caps;
+ denali->oob_skip_bytes = data->oob_skip_bytes;
+ denali->ecc_caps = data->ecc_caps;
+
+ denali->dev = dev;
+ denali->irq = platform_get_irq(pdev, 0);
+ if (denali->irq < 0)
+ return denali->irq;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg");
+ denali->reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(denali->reg))
+ return PTR_ERR(denali->reg);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
+ denali->host = devm_ioremap_resource(dev, res);
+ if (IS_ERR(denali->host))
+ return PTR_ERR(denali->host);
+
+ dt->clk = devm_clk_get(dev, "nand");
+ if (IS_ERR(dt->clk))
+ return PTR_ERR(dt->clk);
+
+ dt->clk_x = devm_clk_get(dev, "nand_x");
+ if (IS_ERR(dt->clk_x))
+ return PTR_ERR(dt->clk_x);
+
+ dt->clk_ecc = devm_clk_get(dev, "ecc");
+ if (IS_ERR(dt->clk_ecc))
+ return PTR_ERR(dt->clk_ecc);
+
+ dt->rst = devm_reset_control_get_optional_shared(dev, "nand");
+ if (IS_ERR(dt->rst))
+ return PTR_ERR(dt->rst);
+
+ dt->rst_reg = devm_reset_control_get_optional_shared(dev, "reg");
+ if (IS_ERR(dt->rst_reg))
+ return PTR_ERR(dt->rst_reg);
+
+ ret = clk_prepare_enable(dt->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dt->clk_x);
+ if (ret)
+ goto out_disable_clk;
+
+ ret = clk_prepare_enable(dt->clk_ecc);
+ if (ret)
+ goto out_disable_clk_x;
+
+ denali->clk_rate = clk_get_rate(dt->clk);
+ denali->clk_x_rate = clk_get_rate(dt->clk_x);
+
+ /*
+ * Deassert the register reset, and the core reset in this order.
+ * Deasserting the core reset while the register reset is asserted
+ * will cause unpredictable behavior in the controller.
+ */
+ ret = reset_control_deassert(dt->rst_reg);
+ if (ret)
+ goto out_disable_clk_ecc;
+
+ ret = reset_control_deassert(dt->rst);
+ if (ret)
+ goto out_assert_rst_reg;
+
+ /*
+ * When the reset is deasserted, the initialization sequence is kicked
+ * (bootstrap process). The driver must wait until it finished.
+ * Otherwise, it will result in unpredictable behavior.
+ */
+ usleep_range(200, 1000);
+
+ ret = denali_init(denali);
+ if (ret)
+ goto out_assert_rst;
+
+ for_each_child_of_node(dev->of_node, np) {
+ ret = denali_dt_chip_init(denali, np);
+ if (ret) {
+ of_node_put(np);
+ goto out_remove_denali;
+ }
+ }
+
+ platform_set_drvdata(pdev, dt);
+
+ return 0;
+
+out_remove_denali:
+ denali_remove(denali);
+out_assert_rst:
+ reset_control_assert(dt->rst);
+out_assert_rst_reg:
+ reset_control_assert(dt->rst_reg);
+out_disable_clk_ecc:
+ clk_disable_unprepare(dt->clk_ecc);
+out_disable_clk_x:
+ clk_disable_unprepare(dt->clk_x);
+out_disable_clk:
+ clk_disable_unprepare(dt->clk);
+
+ return ret;
+}
+
+static int denali_dt_remove(struct platform_device *pdev)
+{
+ struct denali_dt *dt = platform_get_drvdata(pdev);
+
+ denali_remove(&dt->controller);
+ reset_control_assert(dt->rst);
+ reset_control_assert(dt->rst_reg);
+ clk_disable_unprepare(dt->clk_ecc);
+ clk_disable_unprepare(dt->clk_x);
+ clk_disable_unprepare(dt->clk);
+
+ return 0;
+}
+
+static struct platform_driver denali_dt_driver = {
+ .probe = denali_dt_probe,
+ .remove = denali_dt_remove,
+ .driver = {
+ .name = "denali-nand-dt",
+ .of_match_table = denali_nand_dt_ids,
+ },
+};
+module_platform_driver(denali_dt_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
new file mode 100644
index 000000000..de7e722d3
--- /dev/null
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME "denali-nand-pci"
+
+#define INTEL_CE4100 1
+#define INTEL_MRST 2
+
+/* List of platforms this NAND controller has be integrated into */
+static const struct pci_device_id denali_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+NAND_ECC_CAPS_SINGLE(denali_pci_ecc_caps, denali_calc_ecc_bytes, 512, 8, 15);
+
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_controller *denali;
+ struct denali_chip *dchip;
+ int nsels, ret, i;
+
+ denali = devm_kzalloc(&dev->dev, sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(dev);
+ if (ret) {
+ dev_err(&dev->dev, "Spectra: pci_enable_device failed.\n");
+ return ret;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_len(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ }
+ }
+
+ pci_set_master(dev);
+ denali->dev = &dev->dev;
+ denali->irq = dev->irq;
+ denali->ecc_caps = &denali_pci_ecc_caps;
+ denali->clk_rate = 50000000; /* 50 MHz */
+ denali->clk_x_rate = 200000000; /* 200 MHz */
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
+ return ret;
+ }
+
+ denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
+ if (!denali->reg) {
+ dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
+ return -ENOMEM;
+ }
+
+ denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
+ if (!denali->host) {
+ dev_err(&dev->dev, "Spectra: ioremap failed!");
+ return -ENOMEM;
+ }
+
+ ret = denali_init(denali);
+ if (ret)
+ return ret;
+
+ nsels = denali->nbanks;
+
+ dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
+ GFP_KERNEL);
+ if (!dchip) {
+ ret = -ENOMEM;
+ goto out_remove_denali;
+ }
+
+ dchip->chip.base.ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
+
+ dchip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++)
+ dchip->sels[i].bank = i;
+
+ ret = denali_chip_init(denali, dchip);
+ if (ret)
+ goto out_remove_denali;
+
+ pci_set_drvdata(dev, denali);
+
+ return 0;
+
+out_remove_denali:
+ denali_remove(denali);
+ return ret;
+}
+
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_controller *denali = pci_get_drvdata(dev);
+
+ denali_remove(denali);
+}
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+module_pci_driver(denali_pci_driver);
+
+MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
+MODULE_AUTHOR("Intel Corporation and its suppliers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
new file mode 100644
index 000000000..26b265e43
--- /dev/null
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -0,0 +1,1579 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2003 Red Hat, Inc.
+ * (C) 2004 Dan Brown <dan_brown@ieee.org>
+ * (C) 2004 Kalev Lember <kalev@smartlink.ee>
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
+ * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
+ *
+ * Error correction code lifted from the old docecc code
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright (C) 2000 Netgem S.A.
+ * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Interface to generic NAND code for M-Systems DiskOnChip devices
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/rslib.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/doc2000.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/inftl.h>
+#include <linux/module.h>
+
+/* Where to look for the devices? */
+#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
+#endif
+
+static unsigned long doc_locations[] __initdata = {
+#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
+ 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
+ 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
+ 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
+ 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
+ 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
+#else
+ 0xc8000, 0xca000, 0xcc000, 0xce000,
+ 0xd0000, 0xd2000, 0xd4000, 0xd6000,
+ 0xd8000, 0xda000, 0xdc000, 0xde000,
+ 0xe0000, 0xe2000, 0xe4000, 0xe6000,
+ 0xe8000, 0xea000, 0xec000, 0xee000,
+#endif
+#endif
+ 0xffffffff };
+
+static struct mtd_info *doclist = NULL;
+
+struct doc_priv {
+ struct nand_controller base;
+ void __iomem *virtadr;
+ unsigned long physadr;
+ u_char ChipID;
+ u_char CDSNControl;
+ int chips_per_floor; /* The number of chips detected on each floor */
+ int curfloor;
+ int curchip;
+ int mh0_page;
+ int mh1_page;
+ struct rs_control *rs_decoder;
+ struct mtd_info *nextdoc;
+ bool supports_32b_reads;
+
+ /* Handle the last stage of initialization (BBT scan, partitioning) */
+ int (*late_init)(struct mtd_info *mtd);
+};
+
+/* This is the ecc value computed by the HW ecc generator upon writing an empty
+ page, one with all 0xff for data. */
+static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
+
+#define INFTL_BBT_RESERVED_BLOCKS 4
+
+#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
+#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
+#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
+
+static int debug = 0;
+module_param(debug, int, 0);
+
+static int try_dword = 1;
+module_param(try_dword, int, 0);
+
+static int no_ecc_failures = 0;
+module_param(no_ecc_failures, int, 0);
+
+static int no_autopart = 0;
+module_param(no_autopart, int, 0);
+
+static int show_firmware_partition = 0;
+module_param(show_firmware_partition, int, 0);
+
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
+static int inftl_bbt_write = 1;
+#else
+static int inftl_bbt_write = 0;
+#endif
+module_param(inftl_bbt_write, int, 0);
+
+static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS;
+module_param(doc_config_location, ulong, 0);
+MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
+
+/* Sector size for HW ECC */
+#define SECTOR_SIZE 512
+/* The sector bytes are packed into NB_DATA 10 bit words */
+#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
+/* Number of roots */
+#define NROOTS 4
+/* First consective root */
+#define FCR 510
+/* Number of symbols */
+#define NN 1023
+
+/*
+ * The HW decoder in the DoC ASIC's provides us a error syndrome,
+ * which we must convert to a standard syndrome usable by the generic
+ * Reed-Solomon library code.
+ *
+ * Fabrice Bellard figured this out in the old docecc code. I added
+ * some comments, improved a minor bit and converted it to make use
+ * of the generic Reed-Solomon library. tglx
+ */
+static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
+{
+ int i, j, nerr, errpos[8];
+ uint8_t parity;
+ uint16_t ds[4], s[5], tmp, errval[8], syn[4];
+ struct rs_codec *cd = rs->codec;
+
+ memset(syn, 0, sizeof(syn));
+ /* Convert the ecc bytes into words */
+ ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
+ ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
+ ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
+ ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
+ parity = ecc[1];
+
+ /* Initialize the syndrome buffer */
+ for (i = 0; i < NROOTS; i++)
+ s[i] = ds[0];
+ /*
+ * Evaluate
+ * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
+ * where x = alpha^(FCR + i)
+ */
+ for (j = 1; j < NROOTS; j++) {
+ if (ds[j] == 0)
+ continue;
+ tmp = cd->index_of[ds[j]];
+ for (i = 0; i < NROOTS; i++)
+ s[i] ^= cd->alpha_to[rs_modnn(cd, tmp + (FCR + i) * j)];
+ }
+
+ /* Calc syn[i] = s[i] / alpha^(v + i) */
+ for (i = 0; i < NROOTS; i++) {
+ if (s[i])
+ syn[i] = rs_modnn(cd, cd->index_of[s[i]] + (NN - FCR - i));
+ }
+ /* Call the decoder library */
+ nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
+
+ /* Incorrectable errors ? */
+ if (nerr < 0)
+ return nerr;
+
+ /*
+ * Correct the errors. The bitpositions are a bit of magic,
+ * but they are given by the design of the de/encoder circuit
+ * in the DoC ASIC's.
+ */
+ for (i = 0; i < nerr; i++) {
+ int index, bitpos, pos = 1015 - errpos[i];
+ uint8_t val;
+ if (pos >= NB_DATA && pos < 1019)
+ continue;
+ if (pos < NB_DATA) {
+ /* extract bit position (MSB first) */
+ pos = 10 * (NB_DATA - 1 - pos) - 6;
+ /* now correct the following 10 bits. At most two bytes
+ can be modified since pos is even */
+ index = (pos >> 3) ^ 1;
+ bitpos = pos & 7;
+ if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] >> (2 + bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ index = ((pos >> 3) + 1) ^ 1;
+ bitpos = (bitpos + 10) & 7;
+ if (bitpos == 0)
+ bitpos = 8;
+ if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] << (8 - bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ }
+ }
+ /* If the parity is wrong, no rescue possible */
+ return parity ? -EBADMSG : nerr;
+}
+
+static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
+{
+ volatile char dummy;
+ int i;
+
+ for (i = 0; i < cycles; i++) {
+ if (DoC_is_Millennium(doc))
+ dummy = ReadDOC(doc->virtadr, NOP);
+ else if (DoC_is_MillenniumPlus(doc))
+ dummy = ReadDOC(doc->virtadr, Mplus_NOP);
+ else
+ dummy = ReadDOC(doc->virtadr, DOCStatus);
+ }
+
+}
+
+#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
+
+/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
+static int _DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ unsigned long timeo = jiffies + (HZ * 10);
+
+ if (debug)
+ printk("_DoC_WaitReady...\n");
+ /* Out-of-line routine to wait for chip response */
+ if (DoC_is_MillenniumPlus(doc)) {
+ while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ } else {
+ while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ }
+
+ return 0;
+}
+
+static inline int DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ int ret = 0;
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ DoC_Delay(doc, 4);
+
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ } else {
+ DoC_Delay(doc, 4);
+
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ DoC_Delay(doc, 2);
+ }
+
+ if (debug)
+ printk("DoC_WaitReady OK\n");
+ return ret;
+}
+
+static void doc2000_write_byte(struct nand_chip *this, u_char datum)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+
+ if (debug)
+ printk("write_byte %02x\n", datum);
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, 2k_CDSN_IO);
+}
+
+static void doc2000_writebuf(struct nand_chip *this, const u_char *buf,
+ int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ if (debug)
+ printk("writebuf of %d bytes: ", len);
+ for (i = 0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug)
+ printk("\n");
+}
+
+static void doc2000_readbuf(struct nand_chip *this, u_char *buf, int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ u32 *buf32 = (u32 *)buf;
+ int i;
+
+ if (debug)
+ printk("readbuf of %d bytes: ", len);
+
+ if (!doc->supports_32b_reads ||
+ ((((unsigned long)buf) | len) & 3)) {
+ for (i = 0; i < len; i++)
+ buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+ } else {
+ for (i = 0; i < len / 4; i++)
+ buf32[i] = readl(docptr + DoC_2k_CDSN_IO + i);
+ }
+}
+
+/*
+ * We need our own readid() here because it's called before the NAND chip
+ * has been initialized, and calling nand_op_readid() would lead to a NULL
+ * pointer exception when dereferencing the NAND timings.
+ */
+static void doc200x_readid(struct nand_chip *this, unsigned int cs, u8 *id)
+{
+ u8 addr = 0;
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr, 50),
+ NAND_OP_8BIT_DATA_IN(2, id, 0),
+ };
+
+ struct nand_operation op = NAND_OPERATION(cs, instrs);
+
+ if (!id)
+ op.ninstrs--;
+
+ this->controller->ops->exec_op(this, &op, false);
+}
+
+static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ uint16_t ret;
+ u8 id[2];
+
+ doc200x_readid(this, nr, id);
+
+ ret = ((u16)id[0] << 8) | id[1];
+
+ if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
+ /* First chip probe. See if we get same results by 32-bit access */
+ union {
+ uint32_t dword;
+ uint8_t byte[4];
+ } ident;
+ void __iomem *docptr = doc->virtadr;
+
+ doc200x_readid(this, nr, NULL);
+
+ ident.dword = readl(docptr + DoC_2k_CDSN_IO);
+ if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
+ pr_info("DiskOnChip 2000 responds to DWORD access\n");
+ doc->supports_32b_reads = true;
+ }
+ }
+
+ return ret;
+}
+
+static void __init doc2000_count_chips(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ uint16_t mfrid;
+ int i;
+
+ /* Max 4 chips per floor on DiskOnChip 2000 */
+ doc->chips_per_floor = 4;
+
+ /* Find out what the first chip is */
+ mfrid = doc200x_ident_chip(mtd, 0);
+
+ /* Find how many chips in each floor. */
+ for (i = 1; i < 4; i++) {
+ if (doc200x_ident_chip(mtd, i) != mfrid)
+ break;
+ }
+ doc->chips_per_floor = i;
+ pr_debug("Detected %d chips per floor.\n", i);
+}
+
+static void doc2001_write_byte(struct nand_chip *this, u_char datum)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, Mil_CDSN_IO);
+ WriteDOC(datum, docptr, WritePipeTerm);
+}
+
+static void doc2001_writebuf(struct nand_chip *this, const u_char *buf, int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ for (i = 0; i < len; i++)
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ /* Terminate write pipeline */
+ WriteDOC(0x00, docptr, WritePipeTerm);
+}
+
+static void doc2001_readbuf(struct nand_chip *this, u_char *buf, int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ /* Start read pipeline */
+ ReadDOC(docptr, ReadPipeInit);
+
+ for (i = 0; i < len - 1; i++)
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
+
+ /* Terminate read pipeline */
+ buf[i] = ReadDOC(docptr, LastDataRead);
+}
+
+static void doc2001plus_writebuf(struct nand_chip *this, const u_char *buf, int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("writebuf of %d bytes: ", len);
+ for (i = 0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug)
+ printk("\n");
+}
+
+static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf of %d bytes: ", len);
+
+ /* Start read pipeline */
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ for (i = 0; i < len - 2; i++) {
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+
+ /* Terminate read pipeline */
+ if (len >= 2) {
+ buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 2]);
+ }
+
+ buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 1]);
+ if (debug)
+ printk("\n");
+}
+
+static void doc200x_write_control(struct doc_priv *doc, u8 value)
+{
+ WriteDOC(value, doc->virtadr, CDSNControl);
+ /* 11.4.3 -- 4 NOPs after CSDNControl write */
+ DoC_Delay(doc, 4);
+}
+
+static void doc200x_exec_instr(struct nand_chip *this,
+ const struct nand_op_instr *instr)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ unsigned int i;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_CLE);
+ doc2000_write_byte(this, instr->ctx.cmd.opcode);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_ALE);
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ u8 addr = instr->ctx.addr.addrs[i];
+
+ if (DoC_is_2000(doc))
+ doc2000_write_byte(this, addr);
+ else
+ doc2001_write_byte(this, addr);
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE);
+ if (DoC_is_2000(doc))
+ doc2000_readbuf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ doc2001_readbuf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE);
+ if (DoC_is_2000(doc))
+ doc2000_writebuf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ doc2001_writebuf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ DoC_WaitReady(doc);
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+}
+
+static int doc200x_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ unsigned int i;
+
+ if (check_only)
+ return true;
+
+ doc->curchip = op->cs % doc->chips_per_floor;
+ doc->curfloor = op->cs / doc->chips_per_floor;
+
+ WriteDOC(doc->curfloor, doc->virtadr, FloorSelect);
+ WriteDOC(doc->curchip, doc->virtadr, CDSNDeviceSelect);
+
+ /* Assert CE pin */
+ doc200x_write_control(doc, CDSN_CTRL_CE);
+
+ for (i = 0; i < op->ninstrs; i++)
+ doc200x_exec_instr(this, &op->instrs[i]);
+
+ /* De-assert CE pin */
+ doc200x_write_control(doc, 0);
+
+ return 0;
+}
+
+static void doc2001plus_write_pipe_term(struct doc_priv *doc)
+{
+ WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
+ WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
+}
+
+static void doc2001plus_exec_instr(struct nand_chip *this,
+ const struct nand_op_instr *instr)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ unsigned int i;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ WriteDOC(instr->ctx.cmd.opcode, doc->virtadr, Mplus_FlashCmd);
+ doc2001plus_write_pipe_term(doc);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ u8 addr = instr->ctx.addr.addrs[i];
+
+ WriteDOC(addr, doc->virtadr, Mplus_FlashAddress);
+ }
+ doc2001plus_write_pipe_term(doc);
+ /* deassert ALE */
+ WriteDOC(0, doc->virtadr, Mplus_FlashControl);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ doc2001plus_readbuf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ doc2001plus_writebuf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ doc2001plus_write_pipe_term(doc);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ DoC_WaitReady(doc);
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+}
+
+static int doc2001plus_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ unsigned int i;
+
+ if (check_only)
+ return true;
+
+ doc->curchip = op->cs % doc->chips_per_floor;
+ doc->curfloor = op->cs / doc->chips_per_floor;
+
+ /* Assert ChipEnable and deassert WriteProtect */
+ WriteDOC(DOC_FLASH_CE, doc->virtadr, Mplus_FlashSelect);
+
+ for (i = 0; i < op->ninstrs; i++)
+ doc2001plus_exec_instr(this, &op->instrs[i]);
+
+ /* De-assert ChipEnable */
+ WriteDOC(0, doc->virtadr, Mplus_FlashSelect);
+
+ return 0;
+}
+
+static void doc200x_enable_hwecc(struct nand_chip *this, int mode)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch (mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
+ break;
+ }
+}
+
+static void doc2001plus_enable_hwecc(struct nand_chip *this, int mode)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch (mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
+ break;
+ }
+}
+
+/* This code is only called on write */
+static int doc200x_calculate_ecc(struct nand_chip *this, const u_char *dat,
+ unsigned char *ecc_code)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ int emptymatch = 1;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ } else {
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ if (ecc_code[i] != empty_write_ecc[i])
+ emptymatch = 0;
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+#if 0
+ /* If emptymatch=1, we might have an all-0xff data buffer. Check. */
+ if (emptymatch) {
+ /* Note: this somewhat expensive test should not be triggered
+ often. It could be optimized away by examining the data in
+ the writebuf routine, and remembering the result. */
+ for (i = 0; i < 512; i++) {
+ if (dat[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, we do have an all-0xff data buffer.
+ Return all-0xff ecc value instead of the computed one, so
+ it'll look just like a freshly-erased page. */
+ if (emptymatch)
+ memset(ecc_code, 0xff, 6);
+#endif
+ return 0;
+}
+
+static int doc200x_correct_data(struct nand_chip *this, u_char *dat,
+ u_char *read_ecc, u_char *isnull)
+{
+ int i, ret = 0;
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ uint8_t calc_ecc[6];
+ volatile u_char dummy;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ } else {
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ }
+
+ /* Error occurred ? */
+ if (dummy & 0x80) {
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ }
+
+ ret = doc_ecc_decode(doc->rs_decoder, dat, calc_ecc);
+ if (ret > 0)
+ pr_err("doc200x_correct_data corrected %d errors\n",
+ ret);
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ if (no_ecc_failures && mtd_is_eccerr(ret)) {
+ pr_err("suppressing ECC failure\n");
+ ret = 0;
+ }
+ return ret;
+}
+
+//u_char mydatabuf[528];
+
+static int doc200x_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static int doc200x_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ /*
+ * The strange out-of-order free bytes definition is a (possibly
+ * unneeded) attempt to retain compatibility. It used to read:
+ * .oobfree = { {8, 8} }
+ * Since that leaves two bytes unusable, it was changed. But the
+ * following scheme might affect existing jffs2 installs by moving the
+ * cleanmarker:
+ * .oobfree = { {6, 10} }
+ * jffs2 seems to handle the above gracefully, but the current scheme
+ * seems safer. The only problem with it is that any code retrieving
+ * free bytes position must be able to handle out-of-order segments.
+ */
+ if (!section) {
+ oobregion->offset = 8;
+ oobregion->length = 8;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops doc200x_ooblayout_ops = {
+ .ecc = doc200x_ooblayout_ecc,
+ .free = doc200x_ooblayout_free,
+};
+
+/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
+ On successful return, buf will contain a copy of the media header for
+ further processing. id is the string to scan for, and will presumably be
+ either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
+ header. The page #s of the found media headers are placed in mh0_page and
+ mh1_page in the DOC private structure. */
+static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ unsigned offs;
+ int ret;
+ size_t retlen;
+
+ for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
+ if (retlen != mtd->writesize)
+ continue;
+ if (ret) {
+ pr_warn("ECC error scanning DOC at 0x%x\n", offs);
+ }
+ if (memcmp(buf, id, 6))
+ continue;
+ pr_info("Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
+ if (doc->mh0_page == -1) {
+ doc->mh0_page = offs >> this->page_shift;
+ if (!findmirror)
+ return 1;
+ continue;
+ }
+ doc->mh1_page = offs >> this->page_shift;
+ return 2;
+ }
+ if (doc->mh0_page == -1) {
+ pr_warn("DiskOnChip %s Media Header not found.\n", id);
+ return 0;
+ }
+ /* Only one mediaheader was found. We want buf to contain a
+ mediaheader on return, so we'll have to re-read the one we found. */
+ offs = doc->mh0_page << this->page_shift;
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
+ if (retlen != mtd->writesize) {
+ /* Insanity. Give up. */
+ pr_err("Read DiskOnChip Media Header once, but can't reread it???\n");
+ return 0;
+ }
+ return 1;
+}
+
+static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ struct nand_memory_organization *memorg;
+ int ret = 0;
+ u_char *buf;
+ struct NFTLMediaHeader *mh;
+ const unsigned psize = 1 << this->page_shift;
+ int numparts = 0;
+ unsigned blocks, maxblocks;
+ int offs, numheaders;
+
+ memorg = nanddev_get_memorg(&this->base);
+
+ buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf) {
+ return 0;
+ }
+ if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
+ goto out;
+ mh = (struct NFTLMediaHeader *)buf;
+
+ le16_to_cpus(&mh->NumEraseUnits);
+ le16_to_cpus(&mh->FirstPhysicalEUN);
+ le32_to_cpus(&mh->FormattedSize);
+
+ pr_info(" DataOrgID = %s\n"
+ " NumEraseUnits = %d\n"
+ " FirstPhysicalEUN = %d\n"
+ " FormattedSize = %d\n"
+ " UnitSizeFactor = %d\n",
+ mh->DataOrgID, mh->NumEraseUnits,
+ mh->FirstPhysicalEUN, mh->FormattedSize,
+ mh->UnitSizeFactor);
+
+ blocks = mtd->size >> this->phys_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+
+ if (mh->UnitSizeFactor == 0x00) {
+ /* Auto-determine UnitSizeFactor. The constraints are:
+ - There can be at most 32768 virtual blocks.
+ - There can be at most (virtual block size - page size)
+ virtual blocks (because MediaHeader+BBT must fit in 1).
+ */
+ mh->UnitSizeFactor = 0xff;
+ while (blocks > maxblocks) {
+ blocks >>= 1;
+ maxblocks = min(32768U, (maxblocks << 1) + psize);
+ mh->UnitSizeFactor--;
+ }
+ pr_warn("UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
+ }
+
+ /* NOTE: The lines below modify internal variables of the NAND and MTD
+ layers; variables with have already been configured by nand_scan.
+ Unfortunately, we didn't know before this point what these values
+ should be. Thus, this code is somewhat dependent on the exact
+ implementation of the NAND layer. */
+ if (mh->UnitSizeFactor != 0xff) {
+ this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
+ memorg->pages_per_eraseblock <<= (0xff - mh->UnitSizeFactor);
+ mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
+ pr_info("Setting virtual erase size to %d\n", mtd->erasesize);
+ blocks = mtd->size >> this->bbt_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+ }
+
+ if (blocks > maxblocks) {
+ pr_err("UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
+ goto out;
+ }
+
+ /* Skip past the media headers. */
+ offs = max(doc->mh0_page, doc->mh1_page);
+ offs <<= this->page_shift;
+ offs += mtd->erasesize;
+
+ if (show_firmware_partition == 1) {
+ parts[0].name = " DiskOnChip Firmware / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = offs;
+ numparts = 1;
+ }
+
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
+
+ offs += parts[numparts].size;
+ numparts++;
+
+ if (offs < mtd->size) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = mtd->size - offs;
+ numparts++;
+ }
+
+ ret = numparts;
+ out:
+ kfree(buf);
+ return ret;
+}
+
+/* This is a stripped-down copy of the code in inftlmount.c */
+static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ int ret = 0;
+ u_char *buf;
+ struct INFTLMediaHeader *mh;
+ struct INFTLPartition *ip;
+ int numparts = 0;
+ int blocks;
+ int vshift, lastvunit = 0;
+ int i;
+ int end = mtd->size;
+
+ if (inftl_bbt_write)
+ end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
+
+ buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf) {
+ return 0;
+ }
+
+ if (!find_media_headers(mtd, buf, "BNAND", 0))
+ goto out;
+ doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
+ mh = (struct INFTLMediaHeader *)buf;
+
+ le32_to_cpus(&mh->NoOfBootImageBlocks);
+ le32_to_cpus(&mh->NoOfBinaryPartitions);
+ le32_to_cpus(&mh->NoOfBDTLPartitions);
+ le32_to_cpus(&mh->BlockMultiplierBits);
+ le32_to_cpus(&mh->FormatFlags);
+ le32_to_cpus(&mh->PercentUsed);
+
+ pr_info(" bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplierBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = %d.%d.%d.%d\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ ((unsigned char *) &mh->OsakVersion)[0] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[1] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[2] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[3] & 0xf,
+ mh->PercentUsed);
+
+ vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
+
+ blocks = mtd->size >> vshift;
+ if (blocks > 32768) {
+ pr_err("BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
+ goto out;
+ }
+
+ blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
+ if (inftl_bbt_write && (blocks > mtd->erasesize)) {
+ pr_err("Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
+ goto out;
+ }
+
+ /* Scan the partitions */
+ for (i = 0; (i < 4); i++) {
+ ip = &(mh->Partitions[i]);
+ le32_to_cpus(&ip->virtualUnits);
+ le32_to_cpus(&ip->firstUnit);
+ le32_to_cpus(&ip->lastUnit);
+ le32_to_cpus(&ip->flags);
+ le32_to_cpus(&ip->spareUnits);
+ le32_to_cpus(&ip->Reserved0);
+
+ pr_info(" PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
+
+ if ((show_firmware_partition == 1) &&
+ (i == 0) && (ip->firstUnit > 0)) {
+ parts[0].name = " DiskOnChip IPL / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = mtd->erasesize * ip->firstUnit;
+ numparts = 1;
+ }
+
+ if (ip->flags & INFTL_BINARY)
+ parts[numparts].name = " DiskOnChip BDK partition";
+ else
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = ip->firstUnit << vshift;
+ parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
+ numparts++;
+ if (ip->lastUnit > lastvunit)
+ lastvunit = ip->lastUnit;
+ if (ip->flags & INFTL_LAST)
+ break;
+ }
+ lastvunit++;
+ if ((lastvunit << vshift) < end) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = lastvunit << vshift;
+ parts[numparts].size = end - parts[numparts].offset;
+ numparts++;
+ }
+ ret = numparts;
+ out:
+ kfree(buf);
+ return ret;
+}
+
+static int __init nftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ struct mtd_partition parts[2];
+
+ memset((char *)parts, 0, sizeof(parts));
+ /* On NFTL, we have to find the media headers before we can read the
+ BBTs, since they're stored in the media header eraseblocks. */
+ numparts = nftl_partscan(mtd, parts);
+ if (!numparts)
+ return -EIO;
+ this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->pages[0] = doc->mh0_page + 1;
+ if (doc->mh1_page != -1) {
+ this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->pages[0] = doc->mh1_page + 1;
+ } else {
+ this->bbt_md = NULL;
+ }
+
+ ret = nand_create_bbt(this);
+ if (ret)
+ return ret;
+
+ return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
+}
+
+static int __init inftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ struct mtd_partition parts[5];
+
+ if (nanddev_ntargets(&this->base) > doc->chips_per_floor) {
+ pr_err("Multi-floor INFTL devices not yet supported.\n");
+ return -EIO;
+ }
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->pages[0] = 2;
+ this->bbt_md = NULL;
+ } else {
+ this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->offs = 8;
+ this->bbt_td->len = 8;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_td->reserved_block_code = 0x01;
+ this->bbt_td->pattern = "MSYS_BBT";
+
+ this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_md->options |= NAND_BBT_WRITE;
+ this->bbt_md->offs = 8;
+ this->bbt_md->len = 8;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_md->reserved_block_code = 0x01;
+ this->bbt_md->pattern = "TBB_SYSM";
+ }
+
+ ret = nand_create_bbt(this);
+ if (ret)
+ return ret;
+
+ memset((char *)parts, 0, sizeof(parts));
+ numparts = inftl_partscan(mtd, parts);
+ /* At least for now, require the INFTL Media Header. We could probably
+ do without it for non-INFTL use, since all it gives us is
+ autopartitioning, but I want to give it more thought. */
+ if (!numparts)
+ return -EIO;
+ return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
+}
+
+static inline int __init doc2000_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+
+ doc->late_init = nftl_scan_bbt;
+
+ doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (NFTL Model)";
+ return (4 * doc->chips_per_floor);
+}
+
+static inline int __init doc2001_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
+ /* It's not a Millennium; it's one of the newer
+ DiskOnChip 2000 units with a similar ASIC.
+ Treat it like a Millennium, except that it
+ can have multiple chips. */
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (INFTL Model)";
+ doc->late_init = inftl_scan_bbt;
+ return (4 * doc->chips_per_floor);
+ } else {
+ /* Bog-standard Millennium */
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium";
+ doc->late_init = nftl_scan_bbt;
+ return 1;
+ }
+}
+
+static inline int __init doc2001plus_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+
+ doc->late_init = inftl_scan_bbt;
+ this->ecc.hwctl = doc2001plus_enable_hwecc;
+
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium Plus";
+
+ return 1;
+}
+
+static int doc200x_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ chip->ecc.strength = 2;
+ chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+ chip->ecc.hwctl = doc200x_enable_hwecc;
+ chip->ecc.calculate = doc200x_calculate_ecc;
+ chip->ecc.correct = doc200x_correct_data;
+
+ return 0;
+}
+
+static const struct nand_controller_ops doc200x_ops = {
+ .exec_op = doc200x_exec_op,
+ .attach_chip = doc200x_attach_chip,
+};
+
+static const struct nand_controller_ops doc2001plus_ops = {
+ .exec_op = doc2001plus_exec_op,
+ .attach_chip = doc200x_attach_chip,
+};
+
+static int __init doc_probe(unsigned long physadr)
+{
+ struct nand_chip *nand = NULL;
+ struct doc_priv *doc = NULL;
+ unsigned char ChipID;
+ struct mtd_info *mtd;
+ void __iomem *virtadr;
+ unsigned char save_control;
+ unsigned char tmp, tmpb, tmpc;
+ int reg, len, numchips;
+ int ret = 0;
+
+ if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip"))
+ return -EBUSY;
+ virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
+ if (!virtadr) {
+ pr_err("Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n",
+ DOC_IOREMAP_LEN, physadr);
+ ret = -EIO;
+ goto error_ioremap;
+ }
+
+ /* It's not possible to cleanly detect the DiskOnChip - the
+ * bootup procedure will put the device into reset mode, and
+ * it's not possible to talk to it without actually writing
+ * to the DOCControl register. So we store the current contents
+ * of the DOCControl register's location, in case we later decide
+ * that it's not a DiskOnChip, and want to put it back how we
+ * found it.
+ */
+ save_control = ReadDOC(virtadr, DOCControl);
+
+ /* Reset the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+
+ /* Enable the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_Doc2k:
+ reg = DoC_2k_ECCStatus;
+ break;
+ case DOC_ChipID_DocMil:
+ reg = DoC_ECCConf;
+ break;
+ case DOC_ChipID_DocMilPlus16:
+ case DOC_ChipID_DocMilPlus32:
+ case 0:
+ /* Possible Millennium Plus, need to do more checks */
+ /* Possibly release from power down mode */
+ for (tmp = 0; (tmp < 4); tmp++)
+ ReadDOC(virtadr, Mplus_Power);
+
+ /* Reset the Millennium Plus ASIC */
+ tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+
+ usleep_range(1000, 2000);
+ /* Enable the Millennium Plus ASIC */
+ tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+ usleep_range(1000, 2000);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_DocMilPlus16:
+ reg = DoC_Mplus_Toggle;
+ break;
+ case DOC_ChipID_DocMilPlus32:
+ pr_err("DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
+ fallthrough;
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ break;
+
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ /* Check the TOGGLE bit in the ECC register */
+ tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ if ((tmp == tmpb) || (tmp != tmpc)) {
+ pr_warn("Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
+ ret = -ENODEV;
+ goto notfound;
+ }
+
+ for (mtd = doclist; mtd; mtd = doc->nextdoc) {
+ unsigned char oldval;
+ unsigned char newval;
+ nand = mtd_to_nand(mtd);
+ doc = nand_get_controller_data(nand);
+ /* Use the alias resolution register to determine if this is
+ in fact the same DOC aliased to a new address. If writes
+ to one chip's alias resolution register change the value on
+ the other chip, they're the same chip. */
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ newval = ReadDOC(virtadr, Mplus_AliasResolution);
+ } else {
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ newval = ReadDOC(virtadr, AliasResolution);
+ }
+ if (oldval != newval)
+ continue;
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ WriteDOC(~newval, virtadr, Mplus_AliasResolution);
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
+ } else {
+ WriteDOC(~newval, virtadr, AliasResolution);
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ WriteDOC(newval, virtadr, AliasResolution); // restore it
+ }
+ newval = ~newval;
+ if (oldval == newval) {
+ pr_debug("Found alias of DOC at 0x%lx to 0x%lx\n",
+ doc->physadr, physadr);
+ goto notfound;
+ }
+ }
+
+ pr_notice("DiskOnChip found at 0x%lx\n", physadr);
+
+ len = sizeof(struct nand_chip) + sizeof(struct doc_priv) +
+ (2 * sizeof(struct nand_bbt_descr));
+ nand = kzalloc(len, GFP_KERNEL);
+ if (!nand) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * Allocate a RS codec instance
+ *
+ * Symbolsize is 10 (bits)
+ * Primitve polynomial is x^10+x^3+1
+ * First consecutive root is 510
+ * Primitve element to generate roots = 1
+ * Generator polinomial degree = 4
+ */
+ doc = (struct doc_priv *) (nand + 1);
+ doc->rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
+ if (!doc->rs_decoder) {
+ pr_err("DiskOnChip: Could not create a RS codec\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ nand_controller_init(&doc->base);
+ if (ChipID == DOC_ChipID_DocMilPlus16)
+ doc->base.ops = &doc2001plus_ops;
+ else
+ doc->base.ops = &doc200x_ops;
+
+ mtd = nand_to_mtd(nand);
+ nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
+ nand->bbt_md = nand->bbt_td + 1;
+
+ mtd->owner = THIS_MODULE;
+ mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
+
+ nand->controller = &doc->base;
+ nand_set_controller_data(nand, doc);
+ nand->bbt_options = NAND_BBT_USE_FLASH;
+ /* Skip the automatic BBT scan so we can run it manually */
+ nand->options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
+
+ doc->physadr = physadr;
+ doc->virtadr = virtadr;
+ doc->ChipID = ChipID;
+ doc->curfloor = -1;
+ doc->curchip = -1;
+ doc->mh0_page = -1;
+ doc->mh1_page = -1;
+ doc->nextdoc = doclist;
+
+ if (ChipID == DOC_ChipID_Doc2k)
+ numchips = doc2000_init(mtd);
+ else if (ChipID == DOC_ChipID_DocMilPlus16)
+ numchips = doc2001plus_init(mtd);
+ else
+ numchips = doc2001_init(mtd);
+
+ if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
+ /* DBB note: i believe nand_cleanup is necessary here, as
+ buffers may have been allocated in nand_base. Check with
+ Thomas. FIX ME! */
+ nand_cleanup(nand);
+ goto fail;
+ }
+
+ /* Success! */
+ doclist = mtd;
+ return 0;
+
+ notfound:
+ /* Put back the contents of the DOCControl register, in case it's not
+ actually a DiskOnChip. */
+ WriteDOC(save_control, virtadr, DOCControl);
+ fail:
+ if (doc)
+ free_rs(doc->rs_decoder);
+ kfree(nand);
+ iounmap(virtadr);
+
+error_ioremap:
+ release_mem_region(physadr, DOC_IOREMAP_LEN);
+
+ return ret;
+}
+
+static void release_nanddoc(void)
+{
+ struct mtd_info *mtd, *nextmtd;
+ struct nand_chip *nand;
+ struct doc_priv *doc;
+ int ret;
+
+ for (mtd = doclist; mtd; mtd = nextmtd) {
+ nand = mtd_to_nand(mtd);
+ doc = nand_get_controller_data(nand);
+
+ nextmtd = doc->nextdoc;
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(nand);
+ iounmap(doc->virtadr);
+ release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
+ free_rs(doc->rs_decoder);
+ kfree(nand);
+ }
+}
+
+static int __init init_nanddoc(void)
+{
+ int i, ret = 0;
+
+ if (doc_config_location) {
+ pr_info("Using configured DiskOnChip probe address 0x%lx\n",
+ doc_config_location);
+ ret = doc_probe(doc_config_location);
+ if (ret < 0)
+ return ret;
+ } else {
+ for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ doc_probe(doc_locations[i]);
+ }
+ }
+ /* No banner message any more. Print a message if no DiskOnChip
+ found, so the user knows we at least tried. */
+ if (!doclist) {
+ pr_info("No valid DiskOnChip devices found\n");
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+static void __exit cleanup_nanddoc(void)
+{
+ /* Cleanup the nand/DoC resources */
+ release_nanddoc();
+}
+
+module_init(init_nanddoc);
+module_exit(cleanup_nanddoc);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver");
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
new file mode 100644
index 000000000..c174b6dc3
--- /dev/null
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Freescale Enhanced Local Bus Controller NAND driver
+ *
+ * Copyright © 2006-2007, 2010 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ * Scott Wood <scottwood@freescale.com>
+ * Jack Lan <jack.lan@freescale.com>
+ * Roy Zang <tie-fei.zang@freescale.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/fsl_lbc.h>
+
+#define MAX_BANKS 8
+#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
+#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
+
+/* mtd information per set */
+
+struct fsl_elbc_mtd {
+ struct nand_chip chip;
+ struct fsl_lbc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+ int page_size; /* NAND page size (0=512, 1=2048) */
+ unsigned int fmr; /* FCM Flash Mode Register value */
+};
+
+/* Freescale eLBC FCM controller information */
+
+struct fsl_elbc_fcm_ctrl {
+ struct nand_controller controller;
+ struct fsl_elbc_mtd *chips[MAX_BANKS];
+
+ u8 __iomem *addr; /* Address of assigned FCM buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes; /* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int status; /* status read from LTESR after last op */
+ unsigned int mdr; /* UPM/FCM Data Register value */
+ unsigned int use_mdr; /* Non zero if the MDR is to be set */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
+};
+
+/* These map to the positions used by the FCM hardware ECC generator */
+
+static int fsl_elbc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (16 * section) + 6;
+ if (priv->fmr & FMR_ECCM)
+ oobregion->offset += 2;
+
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int fsl_elbc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+
+ if (section > chip->ecc.steps)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ if (mtd->writesize > 512)
+ oobregion->offset++;
+ oobregion->length = (priv->fmr & FMR_ECCM) ? 7 : 5;
+ } else {
+ oobregion->offset = (16 * section) -
+ ((priv->fmr & FMR_ECCM) ? 5 : 7);
+ if (section < chip->ecc.steps)
+ oobregion->length = 13;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsl_elbc_ooblayout_ops = {
+ .ecc = fsl_elbc_ooblayout_ecc,
+ .free = fsl_elbc_ooblayout_free,
+};
+
+/*
+ * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
+ * interfere with ECC positions, that's why we implement our own descriptors.
+ * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*=================================*/
+
+/*
+ * Set up the FCM hardware block and page address fields, and the fcm
+ * structure addr field to point to the correct FCM buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ int buf_num;
+
+ elbc_fcm_ctrl->page = page_addr;
+
+ if (priv->page_size) {
+ /*
+ * large page size chip : FPAR[PI] save the lowest 6 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 6);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
+ (oob ? FPAR_LP_MS : 0) | column);
+ buf_num = (page_addr & 1) << 2;
+ } else {
+ /*
+ * small page size chip : FPAR[PI] save the lowest 5 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 5);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
+ (oob ? FPAR_SP_MS : 0) | column);
+ buf_num = page_addr & 7;
+ }
+
+ elbc_fcm_ctrl->addr = priv->vbase + buf_num * 1024;
+ elbc_fcm_ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ elbc_fcm_ctrl->index += priv->page_size ? 2048 : 512;
+
+ dev_vdbg(priv->dev, "set_addr: bank=%d, "
+ "elbc_fcm_ctrl->addr=0x%p (0x%p), "
+ "index %x, pes %d ps %d\n",
+ buf_num, elbc_fcm_ctrl->addr, priv->vbase,
+ elbc_fcm_ctrl->index,
+ chip->phys_erase_shift, chip->page_shift);
+}
+
+/*
+ * execute FCM command and wait for it to complete
+ */
+static int fsl_elbc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ /* Setup the FMR[OP] to execute without write protection */
+ out_be32(&lbc->fmr, priv->fmr | 3);
+ if (elbc_fcm_ctrl->use_mdr)
+ out_be32(&lbc->mdr, elbc_fcm_ctrl->mdr);
+
+ dev_vdbg(priv->dev,
+ "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
+ in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
+ dev_vdbg(priv->dev,
+ "fsl_elbc_run_command: fbar=%08x fpar=%08x "
+ "fbcr=%08x bank=%d\n",
+ in_be32(&lbc->fbar), in_be32(&lbc->fpar),
+ in_be32(&lbc->fbcr), priv->bank);
+
+ ctrl->irq_status = 0;
+ /* execute special operation */
+ out_be32(&lbc->lsor, priv->bank);
+
+ /* wait for FCM complete flag or timeout */
+ wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
+ FCM_TIMEOUT_MSECS * HZ/1000);
+ elbc_fcm_ctrl->status = ctrl->irq_status;
+ /* store mdr value in case it was needed */
+ if (elbc_fcm_ctrl->use_mdr)
+ elbc_fcm_ctrl->mdr = in_be32(&lbc->mdr);
+
+ elbc_fcm_ctrl->use_mdr = 0;
+
+ if (elbc_fcm_ctrl->status != LTESR_CC) {
+ dev_info(priv->dev,
+ "command failed: fir %x fcr %x status %x mdr %x\n",
+ in_be32(&lbc->fir), in_be32(&lbc->fcr),
+ elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr);
+ return -EIO;
+ }
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ elbc_fcm_ctrl->max_bitflips = 0;
+
+ if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
+ uint32_t lteccr = in_be32(&lbc->lteccr);
+ /*
+ * if command was a full page read and the ELBC
+ * has the LTECCR register, then bits 12-15 (ppc order) of
+ * LTECCR indicates which 512 byte sub-pages had fixed errors.
+ * bits 28-31 are uncorrectable errors, marked elsewhere.
+ * for small page nand only 1 bit is used.
+ * if the ELBC doesn't have the lteccr register it reads 0
+ * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
+ * count the number of sub-pages with bitflips and update
+ * ecc_stats.corrected accordingly.
+ */
+ if (lteccr & 0x000F000F)
+ out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
+ if (lteccr & 0x000F0000) {
+ mtd->ecc_stats.corrected++;
+ elbc_fcm_ctrl->max_bitflips = 1;
+ }
+ }
+
+ return 0;
+}
+
+static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
+{
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_CM1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RBW << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_RBW << FIR_OP3_SHIFT));
+
+ if (oob)
+ out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT);
+ else
+ out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
+ }
+}
+
+/* cmdfunc send commands to the FCM */
+static void fsl_elbc_cmdfunc(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ elbc_fcm_ctrl->use_mdr = 0;
+
+ /* clear the read buffer */
+ elbc_fcm_ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ elbc_fcm_ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 and READ1 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ1:
+ column += 256;
+ fallthrough;
+ case NAND_CMD_READ0:
+ dev_dbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+
+ out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
+ set_addr(mtd, 0, page_addr, 0);
+
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ elbc_fcm_ctrl->index += column;
+
+ fsl_elbc_do_read(chip, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* RNDOUT moves the pointer inside the page */
+ case NAND_CMD_RNDOUT:
+ dev_dbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_RNDOUT, column: 0x%x.\n",
+ column);
+
+ elbc_fcm_ctrl->index = column;
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+ out_be32(&lbc->fbcr, mtd->oobsize - column);
+ set_addr(mtd, column, page_addr, 1);
+
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_elbc_do_read(chip, 1);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
+
+ out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_UA << FIR_OP1_SHIFT) |
+ (FIR_OP_RBW << FIR_OP2_SHIFT));
+ out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&lbc->fbcr, 256);
+ elbc_fcm_ctrl->read_bytes = 256;
+ elbc_fcm_ctrl->use_mdr = 1;
+ elbc_fcm_ctrl->mdr = column;
+ set_addr(mtd, 0, 0, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
+ "page_addr: 0x%x.\n", page_addr);
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_PA << FIR_OP1_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP2_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RS << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr,
+ (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+ (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
+
+ out_be32(&lbc->fbcr, 0);
+ elbc_fcm_ctrl->read_bytes = 0;
+ elbc_fcm_ctrl->use_mdr = 1;
+
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ __be32 fcr;
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
+ "page_addr: 0x%x, column: 0x%x.\n",
+ page_addr, column);
+
+ elbc_fcm_ctrl->column = column;
+ elbc_fcm_ctrl->use_mdr = 1;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ elbc_fcm_ctrl->oob = 1;
+ } else {
+ WARN_ON(column != 0);
+ elbc_fcm_ctrl->oob = 0;
+ }
+
+ fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
+ (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM2 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_WB << FIR_OP3_SHIFT) |
+ (FIR_OP_CM3 << FIR_OP4_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP5_SHIFT) |
+ (FIR_OP_RS << FIR_OP6_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP1_SHIFT) |
+ (FIR_OP_CA << FIR_OP2_SHIFT) |
+ (FIR_OP_PA << FIR_OP3_SHIFT) |
+ (FIR_OP_WB << FIR_OP4_SHIFT) |
+ (FIR_OP_CM3 << FIR_OP5_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP6_SHIFT) |
+ (FIR_OP_RS << FIR_OP7_SHIFT));
+
+ if (elbc_fcm_ctrl->oob)
+ /* OOB area --> READOOB */
+ fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
+ else
+ /* First 256 bytes --> READ0 */
+ fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+ }
+
+ out_be32(&lbc->fcr, fcr);
+ set_addr(mtd, column, page_addr, elbc_fcm_ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
+ "writing %d bytes.\n", elbc_fcm_ctrl->index);
+
+ /* if the write did not start at 0 or is not a full page
+ * then set the exact length, otherwise use a full page
+ * write so the HW generates the ECC.
+ */
+ if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
+ elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
+ out_be32(&lbc->fbcr,
+ elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
+ else
+ out_be32(&lbc->fbcr, 0);
+
+ fsl_elbc_run_command(mtd);
+ return;
+ }
+
+ /* CMD_STATUS must read the status byte while CEB is active */
+ /* Note - it does not wait for the ready line */
+ case NAND_CMD_STATUS:
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_RBW << FIR_OP1_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+ out_be32(&lbc->fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ elbc_fcm_ctrl->read_bytes = 1;
+
+ fsl_elbc_run_command(mtd);
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ setbits8(elbc_fcm_ctrl->addr, NAND_STATUS_WP);
+ return;
+
+ /* RESET without waiting for the ready line */
+ case NAND_CMD_RESET:
+ dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
+ out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
+ out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(priv->dev,
+ "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
+ command);
+ }
+}
+
+static void fsl_elbc_select_chip(struct nand_chip *chip, int cs)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the FCM Controller Data Buffer
+ */
+static void fsl_elbc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ dev_err(priv->dev, "write_buf of %d bytes", len);
+ elbc_fcm_ctrl->status = 0;
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - elbc_fcm_ctrl->index) {
+ dev_err(priv->dev,
+ "write_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, bufsize - elbc_fcm_ctrl->index);
+ len = bufsize - elbc_fcm_ctrl->index;
+ }
+
+ memcpy_toio(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], buf, len);
+ /*
+ * This is workaround for the weird elbc hangs during nand write,
+ * Scott Wood says: "...perhaps difference in how long it takes a
+ * write to make it through the localbus compared to a write to IMMR
+ * is causing problems, and sync isn't helping for some reason."
+ * Reading back the last byte helps though.
+ */
+ in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index] + len - 1);
+
+ elbc_fcm_ctrl->index += len;
+}
+
+/*
+ * read a byte from either the FCM hardware buffer if it has any data left
+ * otherwise issue a command to read a single byte.
+ */
+static u8 fsl_elbc_read_byte(struct nand_chip *chip)
+{
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+
+ /* If there are still bytes in the FCM, then use the next byte. */
+ if (elbc_fcm_ctrl->index < elbc_fcm_ctrl->read_bytes)
+ return in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index++]);
+
+ dev_err(priv->dev, "read_byte beyond end of buffer\n");
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the FCM Controller Data Buffer
+ */
+static void fsl_elbc_read_buf(struct nand_chip *chip, u8 *buf, int len)
+{
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ int avail;
+
+ if (len < 0)
+ return;
+
+ avail = min((unsigned int)len,
+ elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
+ memcpy_fromio(buf, &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], avail);
+ elbc_fcm_ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(priv->dev,
+ "read_buf beyond end of buffer "
+ "(%d requested, %d available)\n",
+ len, avail);
+}
+
+/* This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_elbc_wait(struct nand_chip *chip)
+{
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+
+ if (elbc_fcm_ctrl->status != LTESR_CC)
+ return NAND_STATUS_FAIL;
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
+}
+
+static int fsl_elbc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (oob_required)
+ fsl_elbc_read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ if (fsl_elbc_wait(chip) & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return elbc_fcm_ctrl->max_bitflips;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ fsl_elbc_write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_subpage(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ fsl_elbc_write_buf(chip, buf, mtd->writesize);
+ fsl_elbc_write_buf(chip, chip->oob_poi, mtd->oobsize);
+ return nand_prog_page_end_op(chip);
+}
+
+static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+{
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct nand_chip *chip = &priv->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
+
+ /* Fill in fsl_elbc_mtd structure */
+ mtd->dev.parent = priv->dev;
+ nand_set_flash_node(chip, priv->dev->of_node);
+
+ /* set timeout to maximum */
+ priv->fmr = 15 << FMR_CWTO_SHIFT;
+ if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+ priv->fmr |= FMR_ECCM;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ chip->legacy.read_byte = fsl_elbc_read_byte;
+ chip->legacy.write_buf = fsl_elbc_write_buf;
+ chip->legacy.read_buf = fsl_elbc_read_buf;
+ chip->legacy.select_chip = fsl_elbc_select_chip;
+ chip->legacy.cmdfunc = fsl_elbc_cmdfunc;
+ chip->legacy.waitfunc = fsl_elbc_wait;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
+
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+ /* set up nand options */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ chip->controller = &elbc_fcm_ctrl->controller;
+ nand_set_controller_data(chip, priv);
+
+ return 0;
+}
+
+static int fsl_elbc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ unsigned int al;
+
+ /*
+ * if ECC was not chosen in DT, decide whether to use HW or SW ECC from
+ * CS Base Register
+ */
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) {
+ /* If CS Base Register selects full hardware ECC then use it */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+ BR_DECC_CHK_GEN) {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ } else {
+ /* otherwise fall back to default software ECC */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ }
+ }
+
+ switch (chip->ecc.engine_type) {
+ /* if HW ECC was chosen, setup ecc and oob layout */
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
+ chip->ecc.write_subpage = fsl_elbc_write_subpage;
+ mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ break;
+
+ /* if none or SW ECC was chosen, we do not need to set anything here */
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* calculate FMR Address Length field */
+ al = 0;
+ if (chip->pagemask & 0xffff0000)
+ al++;
+ if (chip->pagemask & 0xff000000)
+ al++;
+
+ priv->fmr |= al << FMR_AL_SHIFT;
+
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
+ nanddev_ntargets(&chip->base));
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
+ nanddev_target_size(&chip->base));
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
+ chip->pagemask);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->legacy.chip_delay = %d\n",
+ chip->legacy.chip_delay);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
+ chip->badblockpos);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
+ chip->chip_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
+ chip->page_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.engine_type = %d\n",
+ chip->ecc.engine_type);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
+ chip->ecc.total);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
+ mtd->ooblayout);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
+ mtd->erasesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
+ mtd->writesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
+ mtd->oobsize);
+
+ /* adjust Option Register and ECC to match Flash page size */
+ if (mtd->writesize == 512) {
+ priv->page_size = 0;
+ clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ } else if (mtd->writesize == 2048) {
+ priv->page_size = 1;
+ setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ } else {
+ dev_err(priv->dev,
+ "fsl_elbc_init: page size %d is not supported\n",
+ mtd->writesize);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops fsl_elbc_controller_ops = {
+ .attach_chip = fsl_elbc_attach_chip,
+};
+
+static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
+{
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+
+ kfree(mtd->name);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ elbc_fcm_ctrl->chips[priv->bank] = NULL;
+ kfree(priv);
+ return 0;
+}
+
+static DEFINE_MUTEX(fsl_elbc_nand_mutex);
+
+static int fsl_elbc_nand_probe(struct platform_device *pdev)
+{
+ struct fsl_lbc_regs __iomem *lbc;
+ struct fsl_elbc_mtd *priv;
+ struct resource res;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
+ int ret;
+ int bank;
+ struct device *dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct mtd_info *mtd;
+
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+ return -ENODEV;
+ lbc = fsl_lbc_ctrl_dev->regs;
+ dev = fsl_lbc_ctrl_dev->dev;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to get resource\n");
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < MAX_BANKS; bank++)
+ if ((in_be32(&lbc->bank[bank].br) & BR_V) &&
+ (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
+ (in_be32(&lbc->bank[bank].br) &
+ in_be32(&lbc->bank[bank].or) & BR_BA)
+ == fsl_lbc_addr(res.start))
+ break;
+
+ if (bank >= MAX_BANKS) {
+ dev_err(dev, "address did not match any chip selects\n");
+ return -ENODEV;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&fsl_elbc_nand_mutex);
+ if (!fsl_lbc_ctrl_dev->nand) {
+ elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
+ if (!elbc_fcm_ctrl) {
+ mutex_unlock(&fsl_elbc_nand_mutex);
+ ret = -ENOMEM;
+ goto err;
+ }
+ elbc_fcm_ctrl->counter++;
+
+ nand_controller_init(&elbc_fcm_ctrl->controller);
+ fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
+ } else {
+ elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_elbc_nand_mutex);
+
+ elbc_fcm_ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = fsl_lbc_ctrl_dev;
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(priv->dev, priv);
+
+ priv->vbase = ioremap(res.start, resource_size(&res));
+ if (!priv->vbase) {
+ dev_err(dev, "failed to map chip region\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mtd = nand_to_mtd(&priv->chip);
+ mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
+ if (!nand_to_mtd(&priv->chip)->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_elbc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ priv->chip.controller->ops = &fsl_elbc_controller_ops;
+ ret = nand_scan(&priv->chip, 1);
+ if (ret)
+ goto err;
+
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+ if (ret)
+ goto cleanup_nand;
+
+ pr_info("eLBC NAND device at 0x%llx, bank %d\n",
+ (unsigned long long)res.start, priv->bank);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(&priv->chip);
+err:
+ fsl_elbc_chip_remove(priv);
+
+ return ret;
+}
+
+static int fsl_elbc_nand_remove(struct platform_device *pdev)
+{
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+ struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+ struct nand_chip *chip = &priv->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ fsl_elbc_chip_remove(priv);
+
+ mutex_lock(&fsl_elbc_nand_mutex);
+ elbc_fcm_ctrl->counter--;
+ if (!elbc_fcm_ctrl->counter) {
+ fsl_lbc_ctrl_dev->nand = NULL;
+ kfree(elbc_fcm_ctrl);
+ }
+ mutex_unlock(&fsl_elbc_nand_mutex);
+
+ return 0;
+
+}
+
+static const struct of_device_id fsl_elbc_nand_match[] = {
+ { .compatible = "fsl,elbc-fcm-nand", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_elbc_nand_match);
+
+static struct platform_driver fsl_elbc_nand_driver = {
+ .driver = {
+ .name = "fsl,elbc-fcm-nand",
+ .of_match_table = fsl_elbc_nand_match,
+ },
+ .probe = fsl_elbc_nand_probe,
+ .remove = fsl_elbc_nand_remove,
+};
+
+module_platform_driver(fsl_elbc_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
new file mode 100644
index 000000000..fcda744e8
--- /dev/null
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -0,0 +1,1142 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale Integrated Flash Controller NAND driver
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/fsl_ifc.h>
+#include <linux/iopoll.h>
+
+#define ERR_BYTE 0xFF /* Value returned for read
+ bytes when read failed */
+#define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait
+ for IFC NAND Machine */
+
+struct fsl_ifc_ctrl;
+
+/* mtd information per set */
+struct fsl_ifc_mtd {
+ struct nand_chip chip;
+ struct fsl_ifc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+};
+
+/* overview of the fsl ifc controller */
+struct fsl_ifc_nand_ctrl {
+ struct nand_controller controller;
+ struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
+
+ void __iomem *addr; /* Address of assigned IFC buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes;/* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int eccread; /* Non zero for a full-page ECC read */
+ unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
+};
+
+static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
+
+/*
+ * Generic flash bbt descriptors
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 8;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section > 1)
+ return -ERANGE;
+
+ if (mtd->writesize == 512 &&
+ !(chip->options & NAND_BUSWIDTH_16)) {
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ }
+
+ return 0;
+ }
+
+ if (!section) {
+ oobregion->offset = 2;
+ oobregion->length = 6;
+ } else {
+ oobregion->offset = chip->ecc.total + 8;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = {
+ .ecc = fsl_ifc_ooblayout_ecc,
+ .free = fsl_ifc_ooblayout_free,
+};
+
+/*
+ * Set up the IFC hardware block and page address fields, and the ifc nand
+ * structure addr field to point to the correct IFC buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ int buf_num;
+
+ ifc_nand_ctrl->page = page_addr;
+ /* Program ROW0/COL0 */
+ ifc_out32(page_addr, &ifc->ifc_nand.row0);
+ ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+
+ buf_num = page_addr & priv->bufnum_mask;
+
+ ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
+ ifc_nand_ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ ifc_nand_ctrl->index += mtd->writesize;
+}
+
+/* returns nonzero if entire page is blank */
+static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
+ u32 eccstat, unsigned int bufnum)
+{
+ return (eccstat >> ((3 - bufnum % 4) * 8)) & 15;
+}
+
+/*
+ * execute IFC NAND command and wait for it to complete
+ */
+static void fsl_ifc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ u32 eccstat;
+ int i;
+
+ /* set the chip select for NAND Transaction */
+ ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
+ &ifc->ifc_nand.nand_csel);
+
+ dev_vdbg(priv->dev,
+ "%s: fir0=%08x fcr0=%08x\n",
+ __func__,
+ ifc_in32(&ifc->ifc_nand.nand_fir0),
+ ifc_in32(&ifc->ifc_nand.nand_fcr0));
+
+ ctrl->nand_stat = 0;
+
+ /* start read/write seq */
+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
+
+ /* ctrl->nand_stat will be updated from IRQ context */
+ if (!ctrl->nand_stat)
+ dev_err(priv->dev, "Controller is not responding\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
+ dev_err(priv->dev, "NAND Flash Timeout Error\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
+ dev_err(priv->dev, "NAND Flash Write Protect Error\n");
+
+ nctrl->max_bitflips = 0;
+
+ if (nctrl->eccread) {
+ int errors;
+ int bufnum = nctrl->page & priv->bufnum_mask;
+ int sector_start = bufnum * chip->ecc.steps;
+ int sector_end = sector_start + chip->ecc.steps - 1;
+ __be32 __iomem *eccstat_regs;
+
+ eccstat_regs = ifc->ifc_nand.nand_eccstat;
+ eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
+
+ for (i = sector_start; i <= sector_end; i++) {
+ if (i != sector_start && !(i % 4))
+ eccstat = ifc_in32(&eccstat_regs[i / 4]);
+
+ errors = check_read_ecc(mtd, ctrl, eccstat, i);
+
+ if (errors == 15) {
+ /*
+ * Uncorrectable error.
+ * We'll check for blank pages later.
+ *
+ * We disable ECCER reporting due to...
+ * erratum IFC-A002770 -- so report it now if we
+ * see an uncorrectable error in ECCSTAT.
+ */
+ ctrl->nand_stat |= IFC_NAND_EVTER_STAT_ECCER;
+ continue;
+ }
+
+ mtd->ecc_stats.corrected += errors;
+ nctrl->max_bitflips = max_t(unsigned int,
+ nctrl->max_bitflips,
+ errors);
+ }
+
+ nctrl->eccread = 0;
+ }
+}
+
+static void fsl_ifc_do_read(struct nand_chip *chip,
+ int oob,
+ struct mtd_info *mtd)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+
+ /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
+ if (mtd->writesize > 512) {
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
+
+ ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
+ } else {
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
+
+ if (oob)
+ ifc_out32(NAND_CMD_READOOB <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ else
+ ifc_out32(NAND_CMD_READ0 <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ }
+}
+
+/* cmdfunc send commands to the IFC NAND Machine */
+static void fsl_ifc_cmdfunc(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr) {
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+
+ /* clear the read buffer */
+ ifc_nand_ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ ifc_nand_ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ0:
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, page_addr, 0);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ ifc_nand_ctrl->index += column;
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ ifc_nand_ctrl->eccread = 1;
+
+ fsl_ifc_do_read(chip, 0, mtd);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, column, page_addr, 1);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_ifc_do_read(chip, 1, mtd);
+ fsl_ifc_run_command(mtd);
+
+ return;
+
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM: {
+ /*
+ * For READID, read 8 bytes that are currently used.
+ * For PARAM, read all 3 copies of 256-bytes pages.
+ */
+ int len = 8;
+ int timing = IFC_FIR_OP_RB;
+ if (command == NAND_CMD_PARAM) {
+ timing = IFC_FIR_OP_RBCD;
+ len = 256 * 3;
+ }
+
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (timing << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(column, &ifc->ifc_nand.row3);
+
+ ifc_out32(len, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = len;
+
+ set_addr(mtd, 0, 0, 0);
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+
+ ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
+
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = 0;
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ u32 nand_fcr0;
+ ifc_nand_ctrl->column = column;
+ ifc_nand_ctrl->oob = 0;
+
+ if (mtd->writesize > 512) {
+ nand_fcr0 =
+ (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
+
+ ifc_out32(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
+ } else {
+ nand_fcr0 = ((NAND_CMD_PAGEPROG <<
+ IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN <<
+ IFC_NAND_FCR0_CMD2_SHIFT) |
+ (NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD3_SHIFT));
+
+ ifc_out32(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
+
+ if (column >= mtd->writesize)
+ nand_fcr0 |=
+ NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
+ else
+ nand_fcr0 |=
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
+ }
+
+ if (column >= mtd->writesize) {
+ /* OOB area --> READOOB */
+ column -= mtd->writesize;
+ ifc_nand_ctrl->oob = 1;
+ }
+ ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+ set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ if (ifc_nand_ctrl->oob) {
+ ifc_out32(ifc_nand_ctrl->index -
+ ifc_nand_ctrl->column,
+ &ifc->ifc_nand.nand_fbcr);
+ } else {
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+ }
+
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ case NAND_CMD_STATUS: {
+ void __iomem *addr;
+
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ addr = ifc_nand_ctrl->addr;
+ if (chip->options & NAND_BUSWIDTH_16)
+ ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
+ else
+ ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
+ return;
+ }
+
+ case NAND_CMD_RESET:
+ ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
+ __func__, command);
+ }
+}
+
+static void fsl_ifc_select_chip(struct nand_chip *chip, int cs)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the IFC NAND Controller Data Buffer
+ */
+static void fsl_ifc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %u available)\n",
+ __func__, len, bufsize - ifc_nand_ctrl->index);
+ len = bufsize - ifc_nand_ctrl->index;
+ }
+
+ memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);
+ ifc_nand_ctrl->index += len;
+}
+
+/*
+ * Read a byte from either the IFC hardware buffer
+ * read function for 8-bit buswidth
+ */
+static uint8_t fsl_ifc_read_byte(struct nand_chip *chip)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ unsigned int offset;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ offset = ifc_nand_ctrl->index++;
+ return ifc_in8(ifc_nand_ctrl->addr + offset);
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read two bytes from the IFC hardware buffer
+ * read function for 16-bit buswith
+ */
+static uint8_t fsl_ifc_read_byte16(struct nand_chip *chip)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ uint16_t data;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+ ifc_nand_ctrl->index += 2;
+ return (uint8_t) data;
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the IFC Controller Data Buffer
+ */
+static void fsl_ifc_read_buf(struct nand_chip *chip, u8 *buf, int len)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ int avail;
+
+ if (len < 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ avail = min((unsigned int)len,
+ ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
+ memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);
+ ifc_nand_ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %d available)\n",
+ __func__, len, avail);
+}
+
+/*
+ * This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_ifc_wait(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ u32 nand_fsr;
+ int status;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
+ status = nand_fsr >> 24;
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ return status | NAND_STATUS_WP;
+}
+
+/*
+ * The controller does not check for bitflips in erased pages,
+ * therefore software must check instead.
+ */
+static int check_erased_page(struct nand_chip *chip, u8 *buf)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *ecc = chip->oob_poi;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int i, res, bitflips = 0;
+ struct mtd_oob_region oobregion = { };
+
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ ecc += oobregion.offset;
+
+ for (i = 0; i < chip->ecc.steps; ++i) {
+ res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
+ NULL, 0,
+ chip->ecc.strength);
+ if (res < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += res;
+
+ bitflips = max(res, bitflips);
+ buf += pkt_size;
+ ecc += ecc_size;
+ }
+
+ return bitflips;
+}
+
+static int fsl_ifc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (oob_required)
+ fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) {
+ if (!oob_required)
+ fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return check_erased_page(chip, buf);
+ }
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ mtd->ecc_stats.failed++;
+
+ return nctrl->max_bitflips;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_ifc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ fsl_ifc_write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int fsl_ifc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+ u32 csor;
+
+ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
+
+ /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+ if (csor & CSOR_NAND_ECC_DEC_EN) {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
+
+ /* Hardware generates ECC per 512 Bytes */
+ chip->ecc.size = 512;
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
+ chip->ecc.bytes = 8;
+ chip->ecc.strength = 4;
+ } else {
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+ } else {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ }
+
+ dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
+ nanddev_ntargets(&chip->base));
+ dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
+ nanddev_target_size(&chip->base));
+ dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
+ chip->pagemask);
+ dev_dbg(priv->dev, "%s: nand->legacy.chip_delay = %d\n", __func__,
+ chip->legacy.chip_delay);
+ dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
+ chip->badblockpos);
+ dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
+ chip->chip_shift);
+ dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
+ chip->page_shift);
+ dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "%s: nand->ecc.engine_type = %d\n", __func__,
+ chip->ecc.engine_type);
+ dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
+ chip->ecc.total);
+ dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__,
+ mtd->ooblayout);
+ dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
+ dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
+ dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
+ mtd->erasesize);
+ dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
+ mtd->writesize);
+ dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
+ mtd->oobsize);
+
+ return 0;
+}
+
+static const struct nand_controller_ops fsl_ifc_controller_ops = {
+ .attach_chip = fsl_ifc_attach_chip,
+};
+
+static int fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+ uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
+ uint32_t cs = priv->bank;
+
+ if (ctrl->version < FSL_IFC_VERSION_1_1_0)
+ return 0;
+
+ if (ctrl->version > FSL_IFC_VERSION_1_1_0) {
+ u32 ncfgr, status;
+ int ret;
+
+ /* Trigger auto initialization */
+ ncfgr = ifc_in32(&ifc_runtime->ifc_nand.ncfgr);
+ ifc_out32(ncfgr | IFC_NAND_NCFGR_SRAM_INIT_EN, &ifc_runtime->ifc_nand.ncfgr);
+
+ /* Wait until done */
+ ret = readx_poll_timeout(ifc_in32, &ifc_runtime->ifc_nand.ncfgr,
+ status, !(status & IFC_NAND_NCFGR_SRAM_INIT_EN),
+ 10, IFC_TIMEOUT_MSECS * 1000);
+ if (ret)
+ dev_err(priv->dev, "Failed to initialize SRAM!\n");
+
+ return ret;
+ }
+
+ /* Save CSOR and CSOR_ext */
+ csor = ifc_in32(&ifc_global->csor_cs[cs].csor);
+ csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext);
+
+ /* chage PageSize 8K and SpareSize 1K*/
+ csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
+ ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor);
+ ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext);
+
+ /* READID */
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc_runtime->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc_runtime->ifc_nand.nand_fcr0);
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row3);
+
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr);
+
+ /* Program ROW0/COL0 */
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row0);
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.col0);
+
+ /* set the chip select for NAND Transaction */
+ ifc_out32(cs << IFC_NAND_CSEL_SHIFT,
+ &ifc_runtime->ifc_nand.nand_csel);
+
+ /* start read seq */
+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT,
+ &ifc_runtime->ifc_nand.nandseq_strt);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) {
+ pr_err("fsl-ifc: Failed to Initialise SRAM\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Restore CSOR and CSOR_ext */
+ ifc_out32(csor, &ifc_global->csor_cs[cs].csor);
+ ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext);
+
+ return 0;
+}
+
+static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
+ struct nand_chip *chip = &priv->chip;
+ struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+ u32 csor;
+ int ret;
+
+ /* Fill in fsl_ifc_mtd structure */
+ mtd->dev.parent = priv->dev;
+ nand_set_flash_node(chip, priv->dev->of_node);
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr))
+ & CSPR_PORT_SIZE_16)
+ chip->legacy.read_byte = fsl_ifc_read_byte16;
+ else
+ chip->legacy.read_byte = fsl_ifc_read_byte;
+
+ chip->legacy.write_buf = fsl_ifc_write_buf;
+ chip->legacy.read_buf = fsl_ifc_read_buf;
+ chip->legacy.select_chip = fsl_ifc_select_chip;
+ chip->legacy.cmdfunc = fsl_ifc_cmdfunc;
+ chip->legacy.waitfunc = fsl_ifc_wait;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
+
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr);
+
+ /* set up nand options */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->options = NAND_NO_SUBPAGE_WRITE;
+
+ if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)
+ & CSPR_PORT_SIZE_16) {
+ chip->legacy.read_byte = fsl_ifc_read_byte16;
+ chip->options |= NAND_BUSWIDTH_16;
+ } else {
+ chip->legacy.read_byte = fsl_ifc_read_byte;
+ }
+
+ chip->controller = &ifc_nand_ctrl->controller;
+ nand_set_controller_data(chip, priv);
+
+ chip->ecc.read_page = fsl_ifc_read_page;
+ chip->ecc.write_page = fsl_ifc_write_page;
+
+ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
+
+ switch (csor & CSOR_NAND_PGS_MASK) {
+ case CSOR_NAND_PGS_512:
+ if (!(chip->options & NAND_BUSWIDTH_16)) {
+ /* Avoid conflict with bad block marker */
+ bbt_main_descr.offs = 0;
+ bbt_mirror_descr.offs = 0;
+ }
+
+ priv->bufnum_mask = 15;
+ break;
+
+ case CSOR_NAND_PGS_2K:
+ priv->bufnum_mask = 3;
+ break;
+
+ case CSOR_NAND_PGS_4K:
+ priv->bufnum_mask = 1;
+ break;
+
+ case CSOR_NAND_PGS_8K:
+ priv->bufnum_mask = 0;
+ break;
+
+ default:
+ dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
+ return -ENODEV;
+ }
+
+ ret = fsl_ifc_sram_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
+ * versions which had 8KB. Hence bufnum mask needs to be updated.
+ */
+ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
+
+ return 0;
+}
+
+static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
+{
+ struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+
+ kfree(mtd->name);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ ifc_nand_ctrl->chips[priv->bank] = NULL;
+
+ return 0;
+}
+
+static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank,
+ phys_addr_t addr)
+{
+ u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr);
+
+ if (!(cspr & CSPR_V))
+ return 0;
+ if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
+ return 0;
+
+ return (cspr & CSPR_BA) == convert_ifc_address(addr);
+}
+
+static DEFINE_MUTEX(fsl_ifc_nand_mutex);
+
+static int fsl_ifc_nand_probe(struct platform_device *dev)
+{
+ struct fsl_ifc_runtime __iomem *ifc;
+ struct fsl_ifc_mtd *priv;
+ struct resource res;
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
+ int ret;
+ int bank;
+ struct device_node *node = dev->dev.of_node;
+ struct mtd_info *mtd;
+
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs)
+ return -ENODEV;
+ ifc = fsl_ifc_ctrl_dev->rregs;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
+ if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start))
+ break;
+ }
+
+ if (bank >= fsl_ifc_ctrl_dev->banks) {
+ dev_err(&dev->dev, "%s: address did not match any chip selects\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ if (!fsl_ifc_ctrl_dev->nand) {
+ ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
+ if (!ifc_nand_ctrl) {
+ mutex_unlock(&fsl_ifc_nand_mutex);
+ return -ENOMEM;
+ }
+
+ ifc_nand_ctrl->read_bytes = 0;
+ ifc_nand_ctrl->index = 0;
+ ifc_nand_ctrl->addr = NULL;
+ fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
+
+ nand_controller_init(&ifc_nand_ctrl->controller);
+ } else {
+ ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ ifc_nand_ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = fsl_ifc_ctrl_dev;
+ priv->dev = &dev->dev;
+
+ priv->vbase = ioremap(res.start, resource_size(&res));
+ if (!priv->vbase) {
+ dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_set_drvdata(priv->dev, priv);
+
+ ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN,
+ &ifc->ifc_nand.nand_evter_en);
+
+ /* enable NAND Machine Interrupts */
+ ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN,
+ &ifc->ifc_nand.nand_evter_intr_en);
+
+ mtd = nand_to_mtd(&priv->chip);
+ mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
+ if (!mtd->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_ifc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ priv->chip.controller->ops = &fsl_ifc_controller_ops;
+ ret = nand_scan(&priv->chip, 1);
+ if (ret)
+ goto err;
+
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+ if (ret)
+ goto cleanup_nand;
+
+ dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
+ (unsigned long long)res.start, priv->bank);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(&priv->chip);
+err:
+ fsl_ifc_chip_remove(priv);
+
+ return ret;
+}
+
+static int fsl_ifc_nand_remove(struct platform_device *dev)
+{
+ struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+ struct nand_chip *chip = &priv->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ fsl_ifc_chip_remove(priv);
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ ifc_nand_ctrl->counter--;
+ if (!ifc_nand_ctrl->counter) {
+ fsl_ifc_ctrl_dev->nand = NULL;
+ kfree(ifc_nand_ctrl);
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_ifc_nand_match[] = {
+ {
+ .compatible = "fsl,ifc-nand",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match);
+
+static struct platform_driver fsl_ifc_nand_driver = {
+ .driver = {
+ .name = "fsl,ifc-nand",
+ .of_match_table = fsl_ifc_nand_match,
+ },
+ .probe = fsl_ifc_nand_probe,
+ .remove = fsl_ifc_nand_remove,
+};
+
+module_platform_driver(fsl_ifc_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
new file mode 100644
index 000000000..9f934466d
--- /dev/null
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale UPM NAND driver.
+ *
+ * Copyright © 2007-2008 MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <asm/fsl_lbc.h>
+
+struct fsl_upm_nand {
+ struct nand_controller base;
+ struct device *dev;
+ struct nand_chip chip;
+ struct fsl_upm upm;
+ uint8_t upm_addr_offset;
+ uint8_t upm_cmd_offset;
+ void __iomem *io_base;
+ struct gpio_desc *rnb_gpio[NAND_MAX_CHIPS];
+ uint32_t mchip_offsets[NAND_MAX_CHIPS];
+ uint32_t mchip_count;
+ uint32_t mchip_number;
+};
+
+static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
+{
+ return container_of(mtd_to_nand(mtdinfo), struct fsl_upm_nand,
+ chip);
+}
+
+static int fun_chip_init(struct fsl_upm_nand *fun,
+ const struct device_node *upm_np,
+ const struct resource *io_res)
+{
+ struct mtd_info *mtd = nand_to_mtd(&fun->chip);
+ int ret;
+ struct device_node *flash_np;
+
+ fun->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ fun->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
+ fun->chip.controller = &fun->base;
+ mtd->dev.parent = fun->dev;
+
+ flash_np = of_get_next_child(upm_np, NULL);
+ if (!flash_np)
+ return -ENODEV;
+
+ nand_set_flash_node(&fun->chip, flash_np);
+ mtd->name = devm_kasprintf(fun->dev, GFP_KERNEL, "0x%llx.%pOFn",
+ (u64)io_res->start,
+ flash_np);
+ if (!mtd->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = nand_scan(&fun->chip, fun->mchip_count);
+ if (ret)
+ goto err;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+err:
+ of_node_put(flash_np);
+ return ret;
+}
+
+static int func_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
+ u32 mar, reg_offs = fun->mchip_offsets[fun->mchip_number];
+ unsigned int i;
+ const u8 *out;
+ u8 *in;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
+ mar = (instr->ctx.cmd.opcode << (32 - fun->upm.width)) |
+ reg_offs;
+ fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar);
+ fsl_upm_end_pattern(&fun->upm);
+ return 0;
+
+ case NAND_OP_ADDR_INSTR:
+ fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ mar = (instr->ctx.addr.addrs[i] << (32 - fun->upm.width)) |
+ reg_offs;
+ fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar);
+ }
+ fsl_upm_end_pattern(&fun->upm);
+ return 0;
+
+ case NAND_OP_DATA_IN_INSTR:
+ in = instr->ctx.data.buf.in;
+ for (i = 0; i < instr->ctx.data.len; i++)
+ in[i] = in_8(fun->io_base + reg_offs);
+ return 0;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ out = instr->ctx.data.buf.out;
+ for (i = 0; i < instr->ctx.data.len; i++)
+ out_8(fun->io_base + reg_offs, out[i]);
+ return 0;
+
+ case NAND_OP_WAITRDY_INSTR:
+ if (!fun->rnb_gpio[fun->mchip_number])
+ return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+
+ return nand_gpio_waitrdy(chip, fun->rnb_gpio[fun->mchip_number],
+ instr->ctx.waitrdy.timeout_ms);
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ bool check_only)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
+ unsigned int i;
+ int ret;
+
+ if (op->cs >= NAND_MAX_CHIPS)
+ return -EINVAL;
+
+ if (check_only)
+ return 0;
+
+ fun->mchip_number = op->cs;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = func_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+
+ if (op->instrs[i].delay_ns)
+ ndelay(op->instrs[i].delay_ns);
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops fun_ops = {
+ .exec_op = fun_exec_op,
+};
+
+static int fun_probe(struct platform_device *ofdev)
+{
+ struct fsl_upm_nand *fun;
+ struct resource *io_res;
+ const __be32 *prop;
+ int ret;
+ int size;
+ int i;
+
+ fun = devm_kzalloc(&ofdev->dev, sizeof(*fun), GFP_KERNEL);
+ if (!fun)
+ return -ENOMEM;
+
+ io_res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ fun->io_base = devm_ioremap_resource(&ofdev->dev, io_res);
+ if (IS_ERR(fun->io_base))
+ return PTR_ERR(fun->io_base);
+
+ ret = fsl_upm_find(io_res->start, &fun->upm);
+ if (ret) {
+ dev_err(&ofdev->dev, "can't find UPM\n");
+ return ret;
+ }
+
+ prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset",
+ &size);
+ if (!prop || size != sizeof(uint32_t)) {
+ dev_err(&ofdev->dev, "can't get UPM address offset\n");
+ return -EINVAL;
+ }
+ fun->upm_addr_offset = *prop;
+
+ prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size);
+ if (!prop || size != sizeof(uint32_t)) {
+ dev_err(&ofdev->dev, "can't get UPM command offset\n");
+ return -EINVAL;
+ }
+ fun->upm_cmd_offset = *prop;
+
+ prop = of_get_property(ofdev->dev.of_node,
+ "fsl,upm-addr-line-cs-offsets", &size);
+ if (prop && (size / sizeof(uint32_t)) > 0) {
+ fun->mchip_count = size / sizeof(uint32_t);
+ if (fun->mchip_count >= NAND_MAX_CHIPS) {
+ dev_err(&ofdev->dev, "too much multiple chips\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < fun->mchip_count; i++)
+ fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
+ } else {
+ fun->mchip_count = 1;
+ }
+
+ for (i = 0; i < fun->mchip_count; i++) {
+ fun->rnb_gpio[i] = devm_gpiod_get_index_optional(&ofdev->dev,
+ NULL, i,
+ GPIOD_IN);
+ if (IS_ERR(fun->rnb_gpio[i])) {
+ dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i);
+ return PTR_ERR(fun->rnb_gpio[i]);
+ }
+ }
+
+ nand_controller_init(&fun->base);
+ fun->base.ops = &fun_ops;
+ fun->dev = &ofdev->dev;
+
+ ret = fun_chip_init(fun, ofdev->dev.of_node, io_res);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(&ofdev->dev, fun);
+
+ return 0;
+}
+
+static int fun_remove(struct platform_device *ofdev)
+{
+ struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
+ struct nand_chip *chip = &fun->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ return 0;
+}
+
+static const struct of_device_id of_fun_match[] = {
+ { .compatible = "fsl,upm-nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_fun_match);
+
+static struct platform_driver of_fun_driver = {
+ .driver = {
+ .name = "fsl,upm-nand",
+ .of_match_table = of_fun_match,
+ },
+ .probe = fun_probe,
+ .remove = fun_remove,
+};
+
+module_platform_driver(of_fun_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_DESCRIPTION("Driver for NAND chips working through Freescale "
+ "LocalBus User-Programmable Machine");
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
new file mode 100644
index 000000000..3da66e95e
--- /dev/null
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -0,0 +1,1232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ST Microelectronics
+ * Flexible Static Memory Controller (FSMC)
+ * Driver for NAND portions
+ *
+ * Copyright © 2010 ST Microelectronics
+ * Vipin Kumar <vipin.kumar@st.com>
+ * Ashish Priyadarshi
+ *
+ * Based on drivers/mtd/nand/nomadik_nand.c (removed in v3.8)
+ * Copyright © 2007 STMicroelectronics Pvt. Ltd.
+ * Copyright © 2009 Alessandro Rubini
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/resource.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/amba/bus.h>
+#include <mtd/mtd-abi.h>
+
+/* fsmc controller registers for NOR flash */
+#define CTRL 0x0
+ /* ctrl register definitions */
+ #define BANK_ENABLE BIT(0)
+ #define MUXED BIT(1)
+ #define NOR_DEV (2 << 2)
+ #define WIDTH_16 BIT(4)
+ #define RSTPWRDWN BIT(6)
+ #define WPROT BIT(7)
+ #define WRT_ENABLE BIT(12)
+ #define WAIT_ENB BIT(13)
+
+#define CTRL_TIM 0x4
+ /* ctrl_tim register definitions */
+
+#define FSMC_NOR_BANK_SZ 0x8
+#define FSMC_NOR_REG_SIZE 0x40
+
+#define FSMC_NOR_REG(base, bank, reg) ((base) + \
+ (FSMC_NOR_BANK_SZ * (bank)) + \
+ (reg))
+
+/* fsmc controller registers for NAND flash */
+#define FSMC_PC 0x00
+ /* pc register definitions */
+ #define FSMC_RESET BIT(0)
+ #define FSMC_WAITON BIT(1)
+ #define FSMC_ENABLE BIT(2)
+ #define FSMC_DEVTYPE_NAND BIT(3)
+ #define FSMC_DEVWID_16 BIT(4)
+ #define FSMC_ECCEN BIT(6)
+ #define FSMC_ECCPLEN_256 BIT(7)
+ #define FSMC_TCLR_SHIFT (9)
+ #define FSMC_TCLR_MASK (0xF)
+ #define FSMC_TAR_SHIFT (13)
+ #define FSMC_TAR_MASK (0xF)
+#define STS 0x04
+ /* sts register definitions */
+ #define FSMC_CODE_RDY BIT(15)
+#define COMM 0x08
+ /* comm register definitions */
+ #define FSMC_TSET_SHIFT 0
+ #define FSMC_TSET_MASK 0xFF
+ #define FSMC_TWAIT_SHIFT 8
+ #define FSMC_TWAIT_MASK 0xFF
+ #define FSMC_THOLD_SHIFT 16
+ #define FSMC_THOLD_MASK 0xFF
+ #define FSMC_THIZ_SHIFT 24
+ #define FSMC_THIZ_MASK 0xFF
+#define ATTRIB 0x0C
+#define IOATA 0x10
+#define ECC1 0x14
+#define ECC2 0x18
+#define ECC3 0x1C
+#define FSMC_NAND_BANK_SZ 0x20
+
+#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
+
+/*
+ * According to SPEAr300 Reference Manual (RM0082)
+ * TOUDEL = 7ns (Output delay from the flip-flops to the board)
+ * TINDEL = 5ns (Input delay from the board to the flipflop)
+ */
+#define TOUTDEL 7000
+#define TINDEL 5000
+
+struct fsmc_nand_timings {
+ u8 tclr;
+ u8 tar;
+ u8 thiz;
+ u8 thold;
+ u8 twait;
+ u8 tset;
+};
+
+enum access_mode {
+ USE_DMA_ACCESS = 1,
+ USE_WORD_ACCESS,
+};
+
+/**
+ * struct fsmc_nand_data - structure for FSMC NAND device state
+ *
+ * @base: Inherit from the nand_controller struct
+ * @pid: Part ID on the AMBA PrimeCell format
+ * @nand: Chip related info for a NAND flash.
+ *
+ * @bank: Bank number for probed device.
+ * @dev: Parent device
+ * @mode: Access mode
+ * @clk: Clock structure for FSMC.
+ *
+ * @read_dma_chan: DMA channel for read access
+ * @write_dma_chan: DMA channel for write access to NAND
+ * @dma_access_complete: Completion structure
+ *
+ * @dev_timings: NAND timings
+ *
+ * @data_pa: NAND Physical port for Data.
+ * @data_va: NAND port for Data.
+ * @cmd_va: NAND port for Command.
+ * @addr_va: NAND port for Address.
+ * @regs_va: Registers base address for a given bank.
+ */
+struct fsmc_nand_data {
+ struct nand_controller base;
+ u32 pid;
+ struct nand_chip nand;
+
+ unsigned int bank;
+ struct device *dev;
+ enum access_mode mode;
+ struct clk *clk;
+
+ /* DMA related objects */
+ struct dma_chan *read_dma_chan;
+ struct dma_chan *write_dma_chan;
+ struct completion dma_access_complete;
+
+ struct fsmc_nand_timings *dev_timings;
+
+ dma_addr_t data_pa;
+ void __iomem *data_va;
+ void __iomem *cmd_va;
+ void __iomem *addr_va;
+ void __iomem *regs_va;
+};
+
+static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 8;
+
+ if (section < chip->ecc.steps - 1)
+ oobregion->length = 8;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
+ .ecc = fsmc_ecc1_ooblayout_ecc,
+ .free = fsmc_ecc1_ooblayout_free,
+};
+
+/*
+ * ECC placement definitions in oobfree type format.
+ * There are 13 bytes of ecc for every 512 byte block and it has to be read
+ * consecutively and immediately after the 512 byte data block for hardware to
+ * generate the error bit offsets in 512 byte data.
+ */
+static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->length = chip->ecc.bytes;
+
+ if (!section && mtd->writesize <= 512)
+ oobregion->offset = 0;
+ else
+ oobregion->offset = (section * 16) + 2;
+
+ return 0;
+}
+
+static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 15;
+
+ if (section < chip->ecc.steps - 1)
+ oobregion->length = 3;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
+ .ecc = fsmc_ecc4_ooblayout_ecc,
+ .free = fsmc_ecc4_ooblayout_free,
+};
+
+static inline struct fsmc_nand_data *nand_to_fsmc(struct nand_chip *chip)
+{
+ return container_of(chip, struct fsmc_nand_data, nand);
+}
+
+/*
+ * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
+ *
+ * This routine initializes timing parameters related to NAND memory access in
+ * FSMC registers
+ */
+static void fsmc_nand_setup(struct fsmc_nand_data *host,
+ struct fsmc_nand_timings *tims)
+{
+ u32 value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+ u32 tclr, tar, thiz, thold, twait, tset;
+
+ tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
+ tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
+ thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
+ thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
+ twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
+ tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
+
+ if (host->nand.options & NAND_BUSWIDTH_16)
+ value |= FSMC_DEVWID_16;
+
+ writel_relaxed(value | tclr | tar, host->regs_va + FSMC_PC);
+ writel_relaxed(thiz | thold | twait | tset, host->regs_va + COMM);
+ writel_relaxed(thiz | thold | twait | tset, host->regs_va + ATTRIB);
+}
+
+static int fsmc_calc_timings(struct fsmc_nand_data *host,
+ const struct nand_sdr_timings *sdrt,
+ struct fsmc_nand_timings *tims)
+{
+ unsigned long hclk = clk_get_rate(host->clk);
+ unsigned long hclkn = NSEC_PER_SEC / hclk;
+ u32 thiz, thold, twait, tset, twait_min;
+
+ if (sdrt->tRC_min < 30000)
+ return -EOPNOTSUPP;
+
+ tims->tar = DIV_ROUND_UP(sdrt->tAR_min / 1000, hclkn) - 1;
+ if (tims->tar > FSMC_TAR_MASK)
+ tims->tar = FSMC_TAR_MASK;
+ tims->tclr = DIV_ROUND_UP(sdrt->tCLR_min / 1000, hclkn) - 1;
+ if (tims->tclr > FSMC_TCLR_MASK)
+ tims->tclr = FSMC_TCLR_MASK;
+
+ thiz = sdrt->tCS_min - sdrt->tWP_min;
+ tims->thiz = DIV_ROUND_UP(thiz / 1000, hclkn);
+
+ thold = sdrt->tDH_min;
+ if (thold < sdrt->tCH_min)
+ thold = sdrt->tCH_min;
+ if (thold < sdrt->tCLH_min)
+ thold = sdrt->tCLH_min;
+ if (thold < sdrt->tWH_min)
+ thold = sdrt->tWH_min;
+ if (thold < sdrt->tALH_min)
+ thold = sdrt->tALH_min;
+ if (thold < sdrt->tREH_min)
+ thold = sdrt->tREH_min;
+ tims->thold = DIV_ROUND_UP(thold / 1000, hclkn);
+ if (tims->thold == 0)
+ tims->thold = 1;
+ else if (tims->thold > FSMC_THOLD_MASK)
+ tims->thold = FSMC_THOLD_MASK;
+
+ tset = max(sdrt->tCS_min - sdrt->tWP_min,
+ sdrt->tCEA_max - sdrt->tREA_max);
+ tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
+ if (tims->tset == 0)
+ tims->tset = 1;
+ else if (tims->tset > FSMC_TSET_MASK)
+ tims->tset = FSMC_TSET_MASK;
+
+ /*
+ * According to SPEAr300 Reference Manual (RM0082) which gives more
+ * information related to FSMSC timings than the SPEAr600 one (RM0305),
+ * twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
+ */
+ twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
+ + TOUTDEL + TINDEL;
+ twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
+
+ tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
+ if (tims->twait == 0)
+ tims->twait = 1;
+ else if (tims->twait > FSMC_TWAIT_MASK)
+ tims->twait = FSMC_TWAIT_MASK;
+
+ return 0;
+}
+
+static int fsmc_setup_interface(struct nand_chip *nand, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(nand);
+ struct fsmc_nand_timings tims;
+ const struct nand_sdr_timings *sdrt;
+ int ret;
+
+ sdrt = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdrt))
+ return PTR_ERR(sdrt);
+
+ ret = fsmc_calc_timings(host, sdrt, &tims);
+ if (ret)
+ return ret;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ fsmc_nand_setup(host, &tims);
+
+ return 0;
+}
+
+/*
+ * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
+ */
+static void fsmc_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+
+ writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCPLEN_256,
+ host->regs_va + FSMC_PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCEN,
+ host->regs_va + FSMC_PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) | FSMC_ECCEN,
+ host->regs_va + FSMC_PC);
+}
+
+/*
+ * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
+ * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
+ * max of 8-bits)
+ */
+static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 ecc_tmp;
+ unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
+
+ do {
+ if (readl_relaxed(host->regs_va + STS) & FSMC_CODE_RDY)
+ break;
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, deadline));
+
+ if (time_after_eq(jiffies, deadline)) {
+ dev_err(host->dev, "calculate ecc timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ecc_tmp = readl_relaxed(host->regs_va + ECC1);
+ ecc[0] = ecc_tmp;
+ ecc[1] = ecc_tmp >> 8;
+ ecc[2] = ecc_tmp >> 16;
+ ecc[3] = ecc_tmp >> 24;
+
+ ecc_tmp = readl_relaxed(host->regs_va + ECC2);
+ ecc[4] = ecc_tmp;
+ ecc[5] = ecc_tmp >> 8;
+ ecc[6] = ecc_tmp >> 16;
+ ecc[7] = ecc_tmp >> 24;
+
+ ecc_tmp = readl_relaxed(host->regs_va + ECC3);
+ ecc[8] = ecc_tmp;
+ ecc[9] = ecc_tmp >> 8;
+ ecc[10] = ecc_tmp >> 16;
+ ecc[11] = ecc_tmp >> 24;
+
+ ecc_tmp = readl_relaxed(host->regs_va + STS);
+ ecc[12] = ecc_tmp >> 16;
+
+ return 0;
+}
+
+/*
+ * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
+ * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
+ * max of 1-bit)
+ */
+static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 ecc_tmp;
+
+ ecc_tmp = readl_relaxed(host->regs_va + ECC1);
+ ecc[0] = ecc_tmp;
+ ecc[1] = ecc_tmp >> 8;
+ ecc[2] = ecc_tmp >> 16;
+
+ return 0;
+}
+
+/* Count the number of 0's in buff upto a max of max_bits */
+static int count_written_bits(u8 *buff, int size, int max_bits)
+{
+ int k, written_bits = 0;
+
+ for (k = 0; k < size; k++) {
+ written_bits += hweight8(~buff[k]);
+ if (written_bits > max_bits)
+ break;
+ }
+
+ return written_bits;
+}
+
+static void dma_complete(void *param)
+{
+ struct fsmc_nand_data *host = param;
+
+ complete(&host->dma_access_complete);
+}
+
+static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
+ enum dma_data_direction direction)
+{
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_dst, dma_src, dma_addr;
+ dma_cookie_t cookie;
+ unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int ret;
+ unsigned long time_left;
+
+ if (direction == DMA_TO_DEVICE)
+ chan = host->write_dma_chan;
+ else if (direction == DMA_FROM_DEVICE)
+ chan = host->read_dma_chan;
+ else
+ return -EINVAL;
+
+ dma_dev = chan->device;
+ dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+
+ if (direction == DMA_TO_DEVICE) {
+ dma_src = dma_addr;
+ dma_dst = host->data_pa;
+ } else {
+ dma_src = host->data_pa;
+ dma_dst = dma_addr;
+ }
+
+ tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
+ len, flags);
+ if (!tx) {
+ dev_err(host->dev, "device_prep_dma_memcpy error\n");
+ ret = -EIO;
+ goto unmap_dma;
+ }
+
+ tx->callback = dma_complete;
+ tx->callback_param = host;
+ cookie = tx->tx_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(host->dev, "dma_submit_error %d\n", cookie);
+ goto unmap_dma;
+ }
+
+ dma_async_issue_pending(chan);
+
+ time_left =
+ wait_for_completion_timeout(&host->dma_access_complete,
+ msecs_to_jiffies(3000));
+ if (time_left == 0) {
+ dmaengine_terminate_all(chan);
+ dev_err(host->dev, "wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
+ goto unmap_dma;
+ }
+
+ ret = 0;
+
+unmap_dma:
+ dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
+
+ return ret;
+}
+
+/*
+ * fsmc_write_buf - write buffer to chip
+ * @host: FSMC NAND controller
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf(struct fsmc_nand_data *host, const u8 *buf,
+ int len)
+{
+ int i;
+
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ IS_ALIGNED(len, sizeof(u32))) {
+ u32 *p = (u32 *)buf;
+
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ writel_relaxed(p[i], host->data_va);
+ } else {
+ for (i = 0; i < len; i++)
+ writeb_relaxed(buf[i], host->data_va);
+ }
+}
+
+/*
+ * fsmc_read_buf - read chip data into buffer
+ * @host: FSMC NAND controller
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf(struct fsmc_nand_data *host, u8 *buf, int len)
+{
+ int i;
+
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ IS_ALIGNED(len, sizeof(u32))) {
+ u32 *p = (u32 *)buf;
+
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ p[i] = readl_relaxed(host->data_va);
+ } else {
+ for (i = 0; i < len; i++)
+ buf[i] = readb_relaxed(host->data_va);
+ }
+}
+
+/*
+ * fsmc_read_buf_dma - read chip data into buffer
+ * @host: FSMC NAND controller
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf_dma(struct fsmc_nand_data *host, u8 *buf,
+ int len)
+{
+ dma_xfer(host, buf, len, DMA_FROM_DEVICE);
+}
+
+/*
+ * fsmc_write_buf_dma - write buffer to chip
+ * @host: FSMC NAND controller
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
+ int len)
+{
+ dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
+}
+
+/*
+ * fsmc_exec_op - hook called by the core to execute NAND operations
+ *
+ * This controller is simple enough and thus does not need to use the parser
+ * provided by the core, instead, handle every situation here.
+ */
+static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ bool check_only)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ const struct nand_op_instr *instr = NULL;
+ int ret = 0;
+ unsigned int op_id;
+ int i;
+
+ if (check_only)
+ return 0;
+
+ pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ nand_op_trace(" ", instr);
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb_relaxed(instr->ctx.cmd.opcode, host->cmd_va);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb_relaxed(instr->ctx.addr.addrs[i],
+ host->addr_va);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ if (host->mode == USE_DMA_ACCESS)
+ fsmc_read_buf_dma(host, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ fsmc_read_buf(host, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ if (host->mode == USE_DMA_ACCESS)
+ fsmc_write_buf_dma(host,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ fsmc_write_buf(host, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = nand_soft_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+ }
+
+ return ret;
+}
+
+/*
+ * fsmc_read_page_hwecc
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * This routine is needed for fsmc version 8 as reading from NAND chip has to be
+ * performed in a strict sequence as follows:
+ * data(512 byte) -> ecc(13 byte)
+ * After this read, fsmc hardware generates and reports error data bits(up to a
+ * max of 8 bits)
+ */
+static int fsmc_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, j, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ int off, len, ret, group = 0;
+ /*
+ * ecc_oob is intentionally taken as u16. In 16bit devices, we
+ * end up reading 14 bytes (7 words) from oob. The local array is
+ * to maintain word alignment
+ */
+ u16 ecc_oob[7];
+ u8 *oob = (u8 *)&ecc_oob[0];
+ unsigned int max_bitflips = 0;
+
+ for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
+ nand_read_page_op(chip, page, s * eccsize, NULL, 0);
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ for (j = 0; j < eccbytes;) {
+ struct mtd_oob_region oobregion;
+
+ ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
+ if (ret)
+ return ret;
+
+ off = oobregion.offset;
+ len = oobregion.length;
+
+ /*
+ * length is intentionally kept a higher multiple of 2
+ * to read at least 13 bytes even in case of 16 bit NAND
+ * devices
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ len = roundup(len, 2);
+
+ nand_read_oob_op(chip, page, off, oob + j, len);
+ j += len;
+ }
+
+ memcpy(&ecc_code[i], oob, chip->ecc.bytes);
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+/*
+ * fsmc_bch8_correct_data
+ * @mtd: mtd info structure
+ * @dat: buffer of read data
+ * @read_ecc: ecc read from device spare area
+ * @calc_ecc: ecc calculated from read data
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset information of 13 bits each in 512 bytes of read data.
+ */
+static int fsmc_bch8_correct_data(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 err_idx[8];
+ u32 num_err, i;
+ u32 ecc1, ecc2, ecc3, ecc4;
+
+ num_err = (readl_relaxed(host->regs_va + STS) >> 10) & 0xF;
+
+ /* no bit flipping */
+ if (likely(num_err == 0))
+ return 0;
+
+ /* too many errors */
+ if (unlikely(num_err > 8)) {
+ /*
+ * This is a temporary erase check. A newly erased page read
+ * would result in an ecc error because the oob data is also
+ * erased to FF and the calculated ecc for an FF data is not
+ * FF..FF.
+ * This is a workaround to skip performing correction in case
+ * data is FF..FF
+ *
+ * Logic:
+ * For every page, each bit written as 0 is counted until these
+ * number of bits are greater than 8 (the maximum correction
+ * capability of FSMC for each 512 + 13 bytes)
+ */
+
+ int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
+ int bits_data = count_written_bits(dat, chip->ecc.size, 8);
+
+ if ((bits_ecc + bits_data) <= 8) {
+ if (bits_data)
+ memset(dat, 0xff, chip->ecc.size);
+ return bits_data;
+ }
+
+ return -EBADMSG;
+ }
+
+ /*
+ * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
+ * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset information of 13 bits each. calc_ecc is copied into a
+ * u64 array and error offset indexes are populated in err_idx
+ * array
+ */
+ ecc1 = readl_relaxed(host->regs_va + ECC1);
+ ecc2 = readl_relaxed(host->regs_va + ECC2);
+ ecc3 = readl_relaxed(host->regs_va + ECC3);
+ ecc4 = readl_relaxed(host->regs_va + STS);
+
+ err_idx[0] = (ecc1 >> 0) & 0x1FFF;
+ err_idx[1] = (ecc1 >> 13) & 0x1FFF;
+ err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
+ err_idx[3] = (ecc2 >> 7) & 0x1FFF;
+ err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
+ err_idx[5] = (ecc3 >> 1) & 0x1FFF;
+ err_idx[6] = (ecc3 >> 14) & 0x1FFF;
+ err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
+
+ i = 0;
+ while (num_err--) {
+ err_idx[i] ^= 3;
+
+ if (err_idx[i] < chip->ecc.size * 8) {
+ int err = err_idx[i];
+
+ dat[err >> 3] ^= BIT(err & 7);
+ i++;
+ }
+ }
+ return i;
+}
+
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ chan->private = slave;
+ return true;
+}
+
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct fsmc_nand_data *host,
+ struct nand_chip *nand)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+ int ret;
+
+ nand->options = 0;
+
+ if (!of_property_read_u32(np, "bank-width", &val)) {
+ if (val == 2) {
+ nand->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(&pdev->dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+
+ if (of_get_property(np, "nand-skip-bbtscan", NULL))
+ nand->options |= NAND_SKIP_BBTSCAN;
+
+ host->dev_timings = devm_kzalloc(&pdev->dev,
+ sizeof(*host->dev_timings),
+ GFP_KERNEL);
+ if (!host->dev_timings)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings,
+ sizeof(*host->dev_timings));
+ if (ret)
+ host->dev_timings = NULL;
+
+ /* Set default NAND bank to 0 */
+ host->bank = 0;
+ if (!of_property_read_u32(np, "bank", &val)) {
+ if (val > 3) {
+ dev_err(&pdev->dev, "invalid bank %u\n", val);
+ return -EINVAL;
+ }
+ host->bank = val;
+ }
+ return 0;
+}
+
+static int fsmc_nand_attach_chip(struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct fsmc_nand_data *host = nand_to_fsmc(nand);
+
+ if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ if (!nand->ecc.size)
+ nand->ecc.size = 512;
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ nand->ecc.read_page = fsmc_read_page_hwecc;
+ nand->ecc.calculate = fsmc_read_hwecc_ecc4;
+ nand->ecc.correct = fsmc_bch8_correct_data;
+ nand->ecc.bytes = 13;
+ nand->ecc.strength = 8;
+ }
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ switch (mtd->oobsize) {
+ case 16:
+ case 64:
+ case 128:
+ case 224:
+ case 256:
+ break;
+ default:
+ dev_warn(host->dev,
+ "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
+
+ return 0;
+ }
+
+ switch (nand->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
+ nand->ecc.calculate = fsmc_read_hwecc_ecc1;
+ nand->ecc.correct = nand_correct_data;
+ nand->ecc.hwctl = fsmc_enable_hwecc;
+ nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
+ nand->ecc.options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ if (nand->ecc.algo == NAND_ECC_ALGO_BCH) {
+ dev_info(host->dev,
+ "Using 4-bit SW BCH ECC scheme\n");
+ break;
+ }
+
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+
+ default:
+ dev_err(host->dev, "Unsupported ECC mode!\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Don't set layout for BCH4 SW ECC. This will be
+ * generated later in nand_bch_init() later.
+ */
+ if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ switch (mtd->oobsize) {
+ case 16:
+ case 64:
+ case 128:
+ mtd_set_ooblayout(mtd,
+ &fsmc_ecc1_ooblayout_ops);
+ break;
+ default:
+ dev_warn(host->dev,
+ "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops fsmc_nand_controller_ops = {
+ .attach_chip = fsmc_nand_attach_chip,
+ .exec_op = fsmc_exec_op,
+ .setup_interface = fsmc_setup_interface,
+};
+
+/**
+ * fsmc_nand_disable() - Disables the NAND bank
+ * @host: The instance to disable
+ */
+static void fsmc_nand_disable(struct fsmc_nand_data *host)
+{
+ u32 val;
+
+ val = readl(host->regs_va + FSMC_PC);
+ val &= ~FSMC_ENABLE;
+ writel(val, host->regs_va + FSMC_PC);
+}
+
+/*
+ * fsmc_nand_probe - Probe function
+ * @pdev: platform device structure
+ */
+static int __init fsmc_nand_probe(struct platform_device *pdev)
+{
+ struct fsmc_nand_data *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct resource *res;
+ void __iomem *base;
+ dma_cap_mask_t mask;
+ int ret = 0;
+ u32 pid;
+ int i;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ nand = &host->nand;
+
+ ret = fsmc_nand_probe_config_dt(pdev, host, nand);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
+ host->data_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->data_va))
+ return PTR_ERR(host->data_va);
+
+ host->data_pa = (dma_addr_t)res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
+ host->addr_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->addr_va))
+ return PTR_ERR(host->addr_va);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
+ host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->cmd_va))
+ return PTR_ERR(host->cmd_va);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ host->regs_va = base + FSMC_NOR_REG_SIZE +
+ (host->bank * FSMC_NAND_BANK_SZ);
+
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "failed to fetch block clock\n");
+ return PTR_ERR(host->clk);
+ }
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ return ret;
+
+ /*
+ * This device ID is actually a common AMBA ID as used on the
+ * AMBA PrimeCell bus. However it is not a PrimeCell.
+ */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(base + resource_size(res) - 0x20 + 4 * i) &
+ 255) << (i * 8);
+
+ host->pid = pid;
+
+ dev_info(&pdev->dev,
+ "FSMC device partno %03x, manufacturer %02x, revision %02x, config %02x\n",
+ AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
+ AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
+
+ host->dev = &pdev->dev;
+
+ if (host->mode == USE_DMA_ACCESS)
+ init_completion(&host->dma_access_complete);
+
+ /* Link all private pointers */
+ mtd = nand_to_mtd(&host->nand);
+ nand_set_flash_node(nand, pdev->dev.of_node);
+
+ mtd->dev.parent = &pdev->dev;
+
+ nand->badblockbits = 7;
+
+ if (host->mode == USE_DMA_ACCESS) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ host->read_dma_chan = dma_request_channel(mask, filter, NULL);
+ if (!host->read_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get read dma channel\n");
+ ret = -ENODEV;
+ goto disable_clk;
+ }
+ host->write_dma_chan = dma_request_channel(mask, filter, NULL);
+ if (!host->write_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get write dma channel\n");
+ ret = -ENODEV;
+ goto release_dma_read_chan;
+ }
+ }
+
+ if (host->dev_timings) {
+ fsmc_nand_setup(host, host->dev_timings);
+ nand->options |= NAND_KEEP_TIMINGS;
+ }
+
+ nand_controller_init(&host->base);
+ host->base.ops = &fsmc_nand_controller_ops;
+ nand->controller = &host->base;
+
+ /*
+ * Scan to find existence of the device
+ */
+ ret = nand_scan(nand, 1);
+ if (ret)
+ goto release_dma_write_chan;
+
+ mtd->name = "nand";
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto cleanup_nand;
+
+ platform_set_drvdata(pdev, host);
+ dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(nand);
+release_dma_write_chan:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->write_dma_chan);
+release_dma_read_chan:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->read_dma_chan);
+disable_clk:
+ fsmc_nand_disable(host);
+ clk_disable_unprepare(host->clk);
+
+ return ret;
+}
+
+/*
+ * Clean up routine
+ */
+static int fsmc_nand_remove(struct platform_device *pdev)
+{
+ struct fsmc_nand_data *host = platform_get_drvdata(pdev);
+
+ if (host) {
+ struct nand_chip *chip = &host->nand;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ fsmc_nand_disable(host);
+
+ if (host->mode == USE_DMA_ACCESS) {
+ dma_release_channel(host->write_dma_chan);
+ dma_release_channel(host->read_dma_chan);
+ }
+ clk_disable_unprepare(host->clk);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fsmc_nand_suspend(struct device *dev)
+{
+ struct fsmc_nand_data *host = dev_get_drvdata(dev);
+
+ if (host)
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static int fsmc_nand_resume(struct device *dev)
+{
+ struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ int ret;
+
+ if (host) {
+ ret = clk_prepare_enable(host->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
+ if (host->dev_timings)
+ fsmc_nand_setup(host, host->dev_timings);
+ nand_reset(&host->nand, 0);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
+
+static const struct of_device_id fsmc_nand_id_table[] = {
+ { .compatible = "st,spear600-fsmc-nand" },
+ { .compatible = "stericsson,fsmc-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
+
+static struct platform_driver fsmc_nand_driver = {
+ .remove = fsmc_nand_remove,
+ .driver = {
+ .name = "fsmc-nand",
+ .of_match_table = fsmc_nand_id_table,
+ .pm = &fsmc_nand_pm_ops,
+ },
+};
+
+module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
+MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c
new file mode 100644
index 000000000..fdf073d2e
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpio.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Updated, and converted to generic GPIO based driver by Russell King.
+ *
+ * Written by Ben Dooks <ben@simtec.co.uk>
+ * Based on 2.4 version by Mark Whittaker
+ *
+ * © 2004 Simtec Electronics
+ *
+ * Device driver for NAND flash that uses a memory mapped interface to
+ * read/write the NAND commands and data, and GPIO pins for control signals
+ * (the DT binding refers to this as "GPIO assisted NAND flash")
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+struct gpiomtd {
+ struct nand_controller base;
+ void __iomem *io;
+ void __iomem *io_sync;
+ struct nand_chip nand_chip;
+ struct gpio_nand_platdata plat;
+ struct gpio_desc *nce; /* Optional chip enable */
+ struct gpio_desc *cle;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct gpio_desc *nwp; /* Optional write protection */
+};
+
+static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct gpiomtd, nand_chip);
+}
+
+
+#ifdef CONFIG_ARM
+/* gpio_nand_dosync()
+ *
+ * Make sure the GPIO state changes occur in-order with writes to NAND
+ * memory region.
+ * Needed on PXA due to bus-reordering within the SoC itself (see section on
+ * I/O ordering in PXA manual (section 2.3, p35)
+ */
+static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
+{
+ unsigned long tmp;
+
+ if (gpiomtd->io_sync) {
+ /*
+ * Linux memory barriers don't cater for what's required here.
+ * What's required is what's here - a read from a separate
+ * region with a dependency on that read.
+ */
+ tmp = readl(gpiomtd->io_sync);
+ asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
+ }
+}
+#else
+static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
+#endif
+
+static int gpio_nand_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
+ unsigned int i;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->cle, 1);
+ gpio_nand_dosync(gpiomtd);
+ writeb(instr->ctx.cmd.opcode, gpiomtd->io);
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->cle, 0);
+ return 0;
+
+ case NAND_OP_ADDR_INSTR:
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->ale, 1);
+ gpio_nand_dosync(gpiomtd);
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb(instr->ctx.addr.addrs[i], gpiomtd->io);
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->ale, 0);
+ return 0;
+
+ case NAND_OP_DATA_IN_INSTR:
+ gpio_nand_dosync(gpiomtd);
+ if ((chip->options & NAND_BUSWIDTH_16) &&
+ !instr->ctx.data.force_8bit)
+ ioread16_rep(gpiomtd->io, instr->ctx.data.buf.in,
+ instr->ctx.data.len / 2);
+ else
+ ioread8_rep(gpiomtd->io, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ return 0;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ gpio_nand_dosync(gpiomtd);
+ if ((chip->options & NAND_BUSWIDTH_16) &&
+ !instr->ctx.data.force_8bit)
+ iowrite16_rep(gpiomtd->io, instr->ctx.data.buf.out,
+ instr->ctx.data.len / 2);
+ else
+ iowrite8_rep(gpiomtd->io, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ return 0;
+
+ case NAND_OP_WAITRDY_INSTR:
+ if (!gpiomtd->rdy)
+ return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+
+ return nand_gpio_waitrdy(chip, gpiomtd->rdy,
+ instr->ctx.waitrdy.timeout_ms);
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int gpio_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->nce, 0);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = gpio_nand_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ break;
+
+ if (op->instrs[i].delay_ns)
+ ndelay(op->instrs[i].delay_ns);
+ }
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->nce, 1);
+
+ return ret;
+}
+
+static int gpio_nand_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops gpio_nand_ops = {
+ .exec_op = gpio_nand_exec_op,
+ .attach_chip = gpio_nand_attach_chip,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+ { .compatible = "gpio-control-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ u32 val;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+ if (val == 2) {
+ plat->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+ plat->chip_delay = val;
+
+ return 0;
+}
+
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ struct resource *r;
+ u64 addr;
+
+ if (of_property_read_u64(pdev->dev.of_node,
+ "gpio-control-nand,io-sync-reg", &addr))
+ return NULL;
+
+ r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ r->start = addr;
+ r->end = r->start + 0x3;
+ r->flags = IORESOURCE_MEM;
+
+ return r;
+}
+#else /* CONFIG_OF */
+static inline int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ return -ENOSYS;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static inline int gpio_nand_get_config(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ int ret = gpio_nand_get_config_of(dev, plat);
+
+ if (!ret)
+ return ret;
+
+ if (dev_get_platdata(dev)) {
+ memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+ struct resource *r = gpio_nand_get_io_sync_of(pdev);
+
+ if (r)
+ return r;
+
+ return platform_get_resource(pdev, IORESOURCE_MEM, 1);
+}
+
+static int gpio_nand_remove(struct platform_device *pdev)
+{
+ struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &gpiomtd->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ /* Enable write protection and disable the chip */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
+
+ return 0;
+}
+
+static int gpio_nand_probe(struct platform_device *pdev)
+{
+ struct gpiomtd *gpiomtd;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ if (!dev->of_node && !dev_get_platdata(dev))
+ return -EINVAL;
+
+ gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
+ if (!gpiomtd)
+ return -ENOMEM;
+
+ chip = &gpiomtd->nand_chip;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpiomtd->io = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gpiomtd->io))
+ return PTR_ERR(gpiomtd->io);
+
+ res = gpio_nand_get_io_sync(pdev);
+ if (res) {
+ gpiomtd->io_sync = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gpiomtd->io_sync))
+ return PTR_ERR(gpiomtd->io_sync);
+ }
+
+ ret = gpio_nand_get_config(dev, &gpiomtd->plat);
+ if (ret)
+ return ret;
+
+ /* Just enable the chip */
+ gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiomtd->nce))
+ return PTR_ERR(gpiomtd->nce);
+
+ /* We disable write protection once we know probe() will succeed */
+ gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
+ }
+
+ gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->ale)) {
+ ret = PTR_ERR(gpiomtd->ale);
+ goto out_ce;
+ }
+
+ gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->cle)) {
+ ret = PTR_ERR(gpiomtd->cle);
+ goto out_ce;
+ }
+
+ gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
+ if (IS_ERR(gpiomtd->rdy)) {
+ ret = PTR_ERR(gpiomtd->rdy);
+ goto out_ce;
+ }
+
+ nand_controller_init(&gpiomtd->base);
+ gpiomtd->base.ops = &gpio_nand_ops;
+
+ nand_set_flash_node(chip, pdev->dev.of_node);
+ chip->options = gpiomtd->plat.options;
+ chip->controller = &gpiomtd->base;
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ platform_set_drvdata(pdev, gpiomtd);
+
+ /* Disable write protection, if wired up */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_direction_output(gpiomtd->nwp, 1);
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ goto err_wp;
+
+ if (gpiomtd->plat.adjust_parts)
+ gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
+
+ ret = mtd_device_register(mtd, gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
+ if (!ret)
+ return 0;
+
+err_wp:
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+out_ce:
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
+
+ return ret;
+}
+
+static struct platform_driver gpio_nand_driver = {
+ .probe = gpio_nand_probe,
+ .remove = gpio_nand_remove,
+ .driver = {
+ .name = "gpio-nand",
+ .of_match_table = of_match_ptr(gpio_nand_id_table),
+ },
+};
+
+module_platform_driver(gpio_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("GPIO NAND Driver");
diff --git a/drivers/mtd/nand/raw/gpmi-nand/Makefile b/drivers/mtd/nand/raw/gpmi-nand/Makefile
new file mode 100644
index 000000000..9bd81a31e
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpmi-nand/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
+gpmi_nand-objs += gpmi-nand.o
diff --git a/drivers/mtd/nand/raw/gpmi-nand/bch-regs.h b/drivers/mtd/nand/raw/gpmi-nand/bch-regs.h
new file mode 100644
index 000000000..a22b8a506
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpmi-nand/bch-regs.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ */
+#ifndef __GPMI_NAND_BCH_REGS_H
+#define __GPMI_NAND_BCH_REGS_H
+
+#define HW_BCH_CTRL 0x00000000
+#define HW_BCH_CTRL_SET 0x00000004
+#define HW_BCH_CTRL_CLR 0x00000008
+#define HW_BCH_CTRL_TOG 0x0000000c
+
+#define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8)
+#define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0)
+
+#define HW_BCH_STATUS0 0x00000010
+#define HW_BCH_MODE 0x00000020
+#define HW_BCH_ENCODEPTR 0x00000030
+#define HW_BCH_DATAPTR 0x00000040
+#define HW_BCH_METAPTR 0x00000050
+#define HW_BCH_LAYOUTSELECT 0x00000070
+
+#define HW_BCH_FLASH0LAYOUT0 0x00000080
+
+#define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24
+#define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
+#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
+
+#define BP_BCH_FLASH0LAYOUT0_META_SIZE 16
+#define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
+ & BM_BCH_FLASH0LAYOUT0_META_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT0_ECC0 12
+#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
+ : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & BM_BCH_FLASH0LAYOUT0_ECC0) \
+ )
+
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \
+ ((GPMI_IS_MX6(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \
+ : 0 \
+ )
+
+#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
+#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ )
+
+#define HW_BCH_FLASH0LAYOUT1 0x00000090
+
+#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16
+#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \
+ (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
+ & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT1_ECCN 12
+#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
+ : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & BM_BCH_FLASH0LAYOUT1_ECCN) \
+ )
+
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \
+ ((GPMI_IS_MX6(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \
+ : 0 \
+ )
+
+#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
+#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ )
+
+#define HW_BCH_VERSION 0x00000160
+#endif
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
new file mode 100644
index 000000000..200d3ab34
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -0,0 +1,2667 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/sched/task_stack.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma/mxs-dma.h>
+#include "gpmi-nand.h"
+#include "gpmi-regs.h"
+#include "bch-regs.h"
+
+/* Resource names for the GPMI NAND driver. */
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
+
+/* Converts time to clock cycles */
+#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
+
+#define MXS_SET_ADDR 0x4
+#define MXS_CLR_ADDR 0x8
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ int timeout = 0x400;
+
+ /* clear the bit */
+ writel(mask, addr + MXS_CLR_ADDR);
+
+ /*
+ * SFTRST needs 3 GPMI clocks to settle, the reference manual
+ * recommends to wait 1us.
+ */
+ udelay(1);
+
+ /* poll the bit becoming clear */
+ while ((readl(addr) & mask) && --timeout)
+ /* nothing */;
+
+ return !timeout;
+}
+
+#define MODULE_CLKGATE (1 << 30)
+#define MODULE_SFTRST (1 << 31)
+/*
+ * The current mxs_reset_block() will do two things:
+ * [1] enable the module.
+ * [2] reset the module.
+ *
+ * In most of the cases, it's ok.
+ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
+ * If you try to soft reset the BCH block, it becomes unusable until
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
+ * You will see a DMA timeout in this case. The bug has been fixed
+ * in the following chips, such as MX28.
+ *
+ * To avoid this bug, just add a new parameter `just_enable` for
+ * the mxs_reset_block(), and rewrite it here.
+ */
+static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+{
+ int ret;
+ int timeout = 0x400;
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear CLKGATE */
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+ if (!just_enable) {
+ /* set SFTRST to reset the block */
+ writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
+ udelay(1);
+
+ /* poll CLKGATE becoming set */
+ while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
+ /* nothing */;
+ if (unlikely(!timeout))
+ goto error;
+ }
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear and poll CLKGATE */
+ ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
+ if (unlikely(ret))
+ goto error;
+
+ return 0;
+
+error:
+ pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+ return -ETIMEDOUT;
+}
+
+static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
+{
+ struct clk *clk;
+ int ret;
+ int i;
+
+ for (i = 0; i < GPMI_CLK_MAX; i++) {
+ clk = this->resources.clock[i];
+ if (!clk)
+ break;
+
+ if (v) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_clk;
+ } else {
+ clk_disable_unprepare(clk);
+ }
+ }
+ return 0;
+
+err_clk:
+ for (; i > 0; i--)
+ clk_disable_unprepare(this->resources.clock[i - 1]);
+ return ret;
+}
+
+static int gpmi_init(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ int ret;
+
+ ret = pm_runtime_get_sync(this->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(this->dev);
+ return ret;
+ }
+
+ ret = gpmi_reset_block(r->gpmi_regs, false);
+ if (ret)
+ goto err_out;
+
+ /*
+ * Reset BCH here, too. We got failures otherwise :(
+ * See later BCH reset for explanation of MX23 and MX28 handling
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
+ if (ret)
+ goto err_out;
+
+ /* Choose NAND mode. */
+ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* Set the IRQ polarity. */
+ writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
+ r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Disable Write-Protection. */
+ writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Select BCH ECC. */
+ writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /*
+ * Decouple the chip select from dma channel. We use dma0 for all
+ * the chips.
+ */
+ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+err_out:
+ pm_runtime_mark_last_busy(this->dev);
+ pm_runtime_put_autosuspend(this->dev);
+ return ret;
+}
+
+/* This function is very useful. It is called only when the bug occur. */
+static void gpmi_dump_info(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct bch_geometry *geo = &this->bch_geometry;
+ u32 reg;
+ int i;
+
+ dev_err(this->dev, "Show GPMI registers :\n");
+ for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
+ reg = readl(r->gpmi_regs + i * 0x10);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
+
+ /* start to print out the BCH info */
+ dev_err(this->dev, "Show BCH registers :\n");
+ for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
+ reg = readl(r->bch_regs + i * 0x10);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
+ dev_err(this->dev, "BCH Geometry :\n"
+ "GF length : %u\n"
+ "ECC Strength : %u\n"
+ "Page Size in Bytes : %u\n"
+ "Metadata Size in Bytes : %u\n"
+ "ECC Chunk Size in Bytes: %u\n"
+ "ECC Chunk Count : %u\n"
+ "Payload Size in Bytes : %u\n"
+ "Auxiliary Size in Bytes: %u\n"
+ "Auxiliary Status Offset: %u\n"
+ "Block Mark Byte Offset : %u\n"
+ "Block Mark Bit Offset : %u\n",
+ geo->gf_len,
+ geo->ecc_strength,
+ geo->page_size,
+ geo->metadata_size,
+ geo->ecc_chunk_size,
+ geo->ecc_chunk_count,
+ geo->payload_size,
+ geo->auxiliary_size,
+ geo->auxiliary_status_offset,
+ geo->block_mark_byte_offset,
+ geo->block_mark_bit_offset);
+}
+
+static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ /* Do the sanity check. */
+ if (GPMI_IS_MXS(this)) {
+ /* The mx23/mx28 only support the GF13. */
+ if (geo->gf_len == 14)
+ return false;
+ }
+ return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
+}
+
+/*
+ * If we can get the ECC information from the nand chip, we do not
+ * need to calculate them ourselves.
+ *
+ * We may have available oob space in this case.
+ */
+static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
+ unsigned int ecc_strength,
+ unsigned int ecc_step)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int block_mark_bit_offset;
+
+ switch (ecc_step) {
+ case SZ_512:
+ geo->gf_len = 13;
+ break;
+ case SZ_1K:
+ geo->gf_len = 14;
+ break;
+ default:
+ dev_err(this->dev,
+ "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
+ nanddev_get_ecc_requirements(&chip->base)->strength,
+ nanddev_get_ecc_requirements(&chip->base)->step_size);
+ return -EINVAL;
+ }
+ geo->ecc_chunk_size = ecc_step;
+ geo->ecc_strength = round_up(ecc_strength, 2);
+ if (!gpmi_check_ecc(this))
+ return -EINVAL;
+
+ /* Keep the C >= O */
+ if (geo->ecc_chunk_size < mtd->oobsize) {
+ dev_err(this->dev,
+ "unsupported nand chip. ecc size: %d, oob size : %d\n",
+ ecc_step, mtd->oobsize);
+ return -EINVAL;
+ }
+
+ /* The default value, see comment in the legacy_set_geometry(). */
+ geo->metadata_size = 10;
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+ /*
+ * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
+ *
+ * | P |
+ * |<----------------------------------------------------->|
+ * | |
+ * | (Block Mark) |
+ * | P' | | | |
+ * |<-------------------------------------------->| D | | O' |
+ * | |<---->| |<--->|
+ * V V V V V
+ * +---+----------+-+----------+-+----------+-+----------+-+-----+
+ * | M | data |E| data |E| data |E| data |E| |
+ * +---+----------+-+----------+-+----------+-+----------+-+-----+
+ * ^ ^
+ * | O |
+ * |<------------>|
+ * | |
+ *
+ * P : the page size for BCH module.
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * M : the metasize of per page.
+ * C : the ecc chunk size, aka the "data" above.
+ * P': the nand chip's page size.
+ * O : the nand chip's oob size.
+ * O': the free oob.
+ *
+ * The formula for P is :
+ *
+ * E * G * N
+ * P = ------------ + P' + M
+ * 8
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * Please see the comment in legacy_set_geometry().
+ * With the condition C >= O , we still can get same result.
+ * So the bit position of the physical block mark within the ECC-based
+ * view of the page is :
+ * (P' - D) * 8
+ */
+ geo->page_size = mtd->writesize + geo->metadata_size +
+ (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+
+ geo->payload_size = mtd->writesize;
+
+ geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
+ geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
+ + ALIGN(geo->ecc_chunk_count, 4);
+
+ if (!this->swap_block_mark)
+ return 0;
+
+ /* For bit swap. */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return 0;
+}
+
+/*
+ * Calculate the ECC strength by hand:
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * O : the oobsize of the NAND chip.
+ * M : the metasize of per page.
+ *
+ * The formula is :
+ * E * G * N
+ * ------------ <= (O - M)
+ * 8
+ *
+ * So, we get E by:
+ * (O - M) * 8
+ * E <= -------------
+ * G * N
+ */
+static inline int get_ecc_strength(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(&this->nand);
+ int ecc_strength;
+
+ ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
+ / (geo->gf_len * geo->ecc_chunk_count);
+
+ /* We need the minor even number. */
+ return round_down(ecc_strength, 2);
+}
+
+static int legacy_set_geometry(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(&this->nand);
+ unsigned int metadata_size;
+ unsigned int status_size;
+ unsigned int block_mark_bit_offset;
+
+ /*
+ * The size of the metadata can be changed, though we set it to 10
+ * bytes now. But it can't be too large, because we have to save
+ * enough space for BCH.
+ */
+ geo->metadata_size = 10;
+
+ /* The default for the length of Galois Field. */
+ geo->gf_len = 13;
+
+ /* The default for chunk size. */
+ geo->ecc_chunk_size = 512;
+ while (geo->ecc_chunk_size < mtd->oobsize) {
+ geo->ecc_chunk_size *= 2; /* keep C >= O */
+ geo->gf_len = 14;
+ }
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+ /* We use the same ECC strength for all chunks. */
+ geo->ecc_strength = get_ecc_strength(this);
+ if (!gpmi_check_ecc(this)) {
+ dev_err(this->dev,
+ "ecc strength: %d cannot be supported by the controller (%d)\n"
+ "try to use minimum ecc strength that NAND chip required\n",
+ geo->ecc_strength,
+ this->devdata->bch_max_ecc_strength);
+ return -EINVAL;
+ }
+
+ geo->page_size = mtd->writesize + geo->metadata_size +
+ (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+ geo->payload_size = mtd->writesize;
+
+ /*
+ * The auxiliary buffer contains the metadata and the ECC status. The
+ * metadata is padded to the nearest 32-bit boundary. The ECC status
+ * contains one byte for every ECC chunk, and is also padded to the
+ * nearest 32-bit boundary.
+ */
+ metadata_size = ALIGN(geo->metadata_size, 4);
+ status_size = ALIGN(geo->ecc_chunk_count, 4);
+
+ geo->auxiliary_size = metadata_size + status_size;
+ geo->auxiliary_status_offset = metadata_size;
+
+ if (!this->swap_block_mark)
+ return 0;
+
+ /*
+ * We need to compute the byte and bit offsets of
+ * the physical block mark within the ECC-based view of the page.
+ *
+ * NAND chip with 2K page shows below:
+ * (Block Mark)
+ * | |
+ * | D |
+ * |<---->|
+ * V V
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ * | M | data |E| data |E| data |E| data |E|
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * With the formula to compute the ECC strength, and the condition
+ * : C >= O (C is the ecc chunk size)
+ *
+ * It's easy to deduce to the following result:
+ *
+ * E * G (O - M) C - M C - M
+ * ----------- <= ------- <= -------- < ---------
+ * 8 N N (N - 1)
+ *
+ * So, we get:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M) < C
+ * 8
+ *
+ * The above inequality means the position of block mark
+ * within the ECC-based view of the page is still in the data chunk,
+ * and it's NOT in the ECC bits of the chunk.
+ *
+ * Use the following to compute the bit position of the
+ * physical block mark within the ECC-based view of the page:
+ * (page_size - D) * 8
+ *
+ * --Huang Shijie
+ */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return 0;
+}
+
+static int common_nfc_set_geometry(struct gpmi_nand_data *this)
+{
+ struct nand_chip *chip = &this->nand;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+
+ if (chip->ecc.strength > 0 && chip->ecc.size > 0)
+ return set_geometry_by_ecc_info(this, chip->ecc.strength,
+ chip->ecc.size);
+
+ if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
+ || legacy_set_geometry(this)) {
+ if (!(requirements->strength > 0 && requirements->step_size > 0))
+ return -EINVAL;
+
+ return set_geometry_by_ecc_info(this,
+ requirements->strength,
+ requirements->step_size);
+ }
+
+ return 0;
+}
+
+/* Configures the geometry for BCH. */
+static int bch_set_geometry(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ int ret;
+
+ ret = common_nfc_set_geometry(this);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(this->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(this->dev);
+ return ret;
+ }
+
+ /*
+ * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
+ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
+ * and MX28.
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
+ if (ret)
+ goto err_out;
+
+ /* Set *all* chip selects to use layout 0. */
+ writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
+
+ ret = 0;
+err_out:
+ pm_runtime_mark_last_busy(this->dev);
+ pm_runtime_put_autosuspend(this->dev);
+
+ return ret;
+}
+
+/*
+ * <1> Firstly, we should know what's the GPMI-clock means.
+ * The GPMI-clock is the internal clock in the gpmi nand controller.
+ * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
+ * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
+ *
+ * <2> Secondly, we should know what's the frequency on the nand chip pins.
+ * The frequency on the nand chip pins is derived from the GPMI-clock.
+ * We can get it from the following equation:
+ *
+ * F = G / (DS + DH)
+ *
+ * F : the frequency on the nand chip pins.
+ * G : the GPMI clock, such as 100MHz.
+ * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
+ * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
+ *
+ * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
+ * the nand EDO(extended Data Out) timing could be applied.
+ * The GPMI implements a feedback read strobe to sample the read data.
+ * The feedback read strobe can be delayed to support the nand EDO timing
+ * where the read strobe may deasserts before the read data is valid, and
+ * read data is valid for some time after read strobe.
+ *
+ * The following figure illustrates some aspects of a NAND Flash read:
+ *
+ * |<---tREA---->|
+ * | |
+ * | | |
+ * |<--tRP-->| |
+ * | | |
+ * __ ___|__________________________________
+ * RDN \________/ |
+ * |
+ * /---------\
+ * Read Data --------------< >---------
+ * \---------/
+ * | |
+ * |<-D->|
+ * FeedbackRDN ________ ____________
+ * \___________/
+ *
+ * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
+ *
+ *
+ * <4> Now, we begin to describe how to compute the right RDN_DELAY.
+ *
+ * 4.1) From the aspect of the nand chip pins:
+ * Delay = (tREA + C - tRP) {1}
+ *
+ * tREA : the maximum read access time.
+ * C : a constant to adjust the delay. default is 4000ps.
+ * tRP : the read pulse width, which is exactly:
+ * tRP = (GPMI-clock-period) * DATA_SETUP
+ *
+ * 4.2) From the aspect of the GPMI nand controller:
+ * Delay = RDN_DELAY * 0.125 * RP {2}
+ *
+ * RP : the DLL reference period.
+ * if (GPMI-clock-period > DLL_THRETHOLD)
+ * RP = GPMI-clock-period / 2;
+ * else
+ * RP = GPMI-clock-period;
+ *
+ * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
+ * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
+ * is 16000ps, but in mx6q, we use 12000ps.
+ *
+ * 4.3) since {1} equals {2}, we get:
+ *
+ * (tREA + 4000 - tRP) * 8
+ * RDN_DELAY = ----------------------- {3}
+ * RP
+ */
+static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
+ const struct nand_sdr_timings *sdr)
+{
+ struct gpmi_nfc_hardware_timing *hw = &this->hw;
+ struct resources *r = &this->resources;
+ unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
+ unsigned int period_ps, reference_period_ps;
+ unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
+ unsigned int tRP_ps;
+ bool use_half_period;
+ int sample_delay_ps, sample_delay_factor;
+ unsigned int busy_timeout_cycles;
+ u8 wrn_dly_sel;
+ u64 busy_timeout_ps;
+
+ if (sdr->tRC_min >= 30000) {
+ /* ONFI non-EDO modes [0-3] */
+ hw->clk_rate = 22000000;
+ wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
+ } else if (sdr->tRC_min >= 25000) {
+ /* ONFI EDO mode 4 */
+ hw->clk_rate = 80000000;
+ wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
+ } else {
+ /* ONFI EDO mode 5 */
+ hw->clk_rate = 100000000;
+ wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
+ }
+
+ hw->clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
+
+ /* SDR core timings are given in picoseconds */
+ period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
+
+ addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
+ data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
+ data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
+ busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
+ busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
+
+ hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
+ BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
+ BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
+ hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
+
+ /*
+ * Derive NFC ideal delay from {3}:
+ *
+ * (tREA + 4000 - tRP) * 8
+ * RDN_DELAY = -----------------------
+ * RP
+ */
+ if (period_ps > dll_threshold_ps) {
+ use_half_period = true;
+ reference_period_ps = period_ps / 2;
+ } else {
+ use_half_period = false;
+ reference_period_ps = period_ps;
+ }
+
+ tRP_ps = data_setup_cycles * period_ps;
+ sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
+ if (sample_delay_ps > 0)
+ sample_delay_factor = sample_delay_ps / reference_period_ps;
+ else
+ sample_delay_factor = 0;
+
+ hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
+ if (sample_delay_factor)
+ hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
+ BM_GPMI_CTRL1_DLL_ENABLE |
+ (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
+}
+
+static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
+{
+ struct gpmi_nfc_hardware_timing *hw = &this->hw;
+ struct resources *r = &this->resources;
+ void __iomem *gpmi_regs = r->gpmi_regs;
+ unsigned int dll_wait_time_us;
+ int ret;
+
+ /* Clock dividers do NOT guarantee a clean clock signal on its output
+ * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
+ * all clock dividers provide these guarantee.
+ */
+ if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
+ clk_disable_unprepare(r->clock[0]);
+
+ ret = clk_set_rate(r->clock[0], hw->clk_rate);
+ if (ret) {
+ dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
+ return ret;
+ }
+
+ if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
+ ret = clk_prepare_enable(r->clock[0]);
+ if (ret)
+ return ret;
+ }
+
+ writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
+ writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
+
+ /*
+ * Clear several CTRL1 fields, DLL must be disabled when setting
+ * RDN_DELAY or HALF_PERIOD.
+ */
+ writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
+ writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
+ dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
+ if (!dll_wait_time_us)
+ dll_wait_time_us = 1;
+
+ /* Wait for the DLL to settle. */
+ udelay(dll_wait_time_us);
+
+ return 0;
+}
+
+static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ const struct nand_sdr_timings *sdr;
+
+ /* Retrieve required NAND timings */
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /* Only MX6 GPMI controller can reach EDO timings */
+ if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
+ return -ENOTSUPP;
+
+ /* Stop here if this call was just a check */
+ if (chipnr < 0)
+ return 0;
+
+ /* Do the actual derivation of the controller timings */
+ gpmi_nfc_compute_timings(this, sdr);
+
+ this->hw.must_apply_timings = true;
+
+ return 0;
+}
+
+/* Clears a BCH interrupt. */
+static void gpmi_clear_bch(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
+}
+
+static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
+{
+ /* We use the DMA channel 0 to access all the nand chips. */
+ return this->dma_chans[0];
+}
+
+/* This will be called after the DMA operation is finished. */
+static void dma_irq_callback(void *param)
+{
+ struct gpmi_nand_data *this = param;
+ struct completion *dma_c = &this->dma_done;
+
+ complete(dma_c);
+}
+
+static irqreturn_t bch_irq(int irq, void *cookie)
+{
+ struct gpmi_nand_data *this = cookie;
+
+ gpmi_clear_bch(this);
+ complete(&this->bch_done);
+ return IRQ_HANDLED;
+}
+
+static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
+{
+ /*
+ * raw_len is the length to read/write including bch data which
+ * we are passed in exec_op. Calculate the data length from it.
+ */
+ if (this->bch)
+ return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
+ else
+ return raw_len;
+}
+
+/* Can we use the upper's buffer directly for DMA? */
+static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
+ int raw_len, struct scatterlist *sgl,
+ enum dma_data_direction dr)
+{
+ int ret;
+ int len = gpmi_raw_len_to_len(this, raw_len);
+
+ /* first try to map the upper buffer directly */
+ if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
+ sg_init_one(sgl, buf, len);
+ ret = dma_map_sg(this->dev, sgl, 1, dr);
+ if (ret == 0)
+ goto map_fail;
+
+ return true;
+ }
+
+map_fail:
+ /* We have to use our own DMA buffer. */
+ sg_init_one(sgl, this->data_buffer_dma, len);
+
+ if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
+ memcpy(this->data_buffer_dma, buf, len);
+
+ dma_map_sg(this->dev, sgl, 1, dr);
+
+ return false;
+}
+
+/* add our owner bbt descriptor */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr gpmi_bbt_descr = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+/*
+ * We may change the layout if we can get the ECC info from the datasheet,
+ * else we will use all the (page + OOB).
+ */
+static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = geo->page_size - mtd->writesize;
+
+ return 0;
+}
+
+static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ if (section)
+ return -ERANGE;
+
+ /* The available oob size we have. */
+ if (geo->page_size < mtd->writesize + mtd->oobsize) {
+ oobregion->offset = geo->page_size - mtd->writesize;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const char * const gpmi_clks_for_mx2x[] = {
+ "gpmi_io",
+};
+
+static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
+ .ecc = gpmi_ooblayout_ecc,
+ .free = gpmi_ooblayout_free,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx23 = {
+ .type = IS_MX23,
+ .bch_max_ecc_strength = 20,
+ .max_chain_delay = 16000,
+ .clks = gpmi_clks_for_mx2x,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx28 = {
+ .type = IS_MX28,
+ .bch_max_ecc_strength = 20,
+ .max_chain_delay = 16000,
+ .clks = gpmi_clks_for_mx2x,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
+};
+
+static const char * const gpmi_clks_for_mx6[] = {
+ "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6q = {
+ .type = IS_MX6Q,
+ .bch_max_ecc_strength = 40,
+ .max_chain_delay = 12000,
+ .clks = gpmi_clks_for_mx6,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6sx = {
+ .type = IS_MX6SX,
+ .bch_max_ecc_strength = 62,
+ .max_chain_delay = 12000,
+ .clks = gpmi_clks_for_mx6,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
+};
+
+static const char * const gpmi_clks_for_mx7d[] = {
+ "gpmi_io", "gpmi_bch_apb",
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx7d = {
+ .type = IS_MX7D,
+ .bch_max_ecc_strength = 62,
+ .max_chain_delay = 12000,
+ .clks = gpmi_clks_for_mx7d,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
+};
+
+static int acquire_register_block(struct gpmi_nand_data *this,
+ const char *res_name)
+{
+ struct platform_device *pdev = this->pdev;
+ struct resources *res = &this->resources;
+ struct resource *r;
+ void __iomem *p;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
+ p = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
+ res->gpmi_regs = p;
+ else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
+ res->bch_regs = p;
+ else
+ dev_err(this->dev, "unknown resource name : %s\n", res_name);
+
+ return 0;
+}
+
+static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
+{
+ struct platform_device *pdev = this->pdev;
+ const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
+ struct resource *r;
+ int err;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
+ if (!r) {
+ dev_err(this->dev, "Can't get resource for %s\n", res_name);
+ return -ENODEV;
+ }
+
+ err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
+ if (err)
+ dev_err(this->dev, "error requesting BCH IRQ\n");
+
+ return err;
+}
+
+static void release_dma_channels(struct gpmi_nand_data *this)
+{
+ unsigned int i;
+ for (i = 0; i < DMA_CHANS; i++)
+ if (this->dma_chans[i]) {
+ dma_release_channel(this->dma_chans[i]);
+ this->dma_chans[i] = NULL;
+ }
+}
+
+static int acquire_dma_channels(struct gpmi_nand_data *this)
+{
+ struct platform_device *pdev = this->pdev;
+ struct dma_chan *dma_chan;
+ int ret = 0;
+
+ /* request dma channel */
+ dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(dma_chan)) {
+ ret = dev_err_probe(this->dev, PTR_ERR(dma_chan),
+ "DMA channel request failed\n");
+ release_dma_channels(this);
+ } else {
+ this->dma_chans[0] = dma_chan;
+ }
+
+ return ret;
+}
+
+static int gpmi_get_clks(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct clk *clk;
+ int err, i;
+
+ for (i = 0; i < this->devdata->clks_count; i++) {
+ clk = devm_clk_get(this->dev, this->devdata->clks[i]);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ goto err_clock;
+ }
+
+ r->clock[i] = clk;
+ }
+
+ return 0;
+
+err_clock:
+ dev_dbg(this->dev, "failed in finding the clocks.\n");
+ return err;
+}
+
+static int acquire_resources(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_bch_irq(this, bch_irq);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_dma_channels(this);
+ if (ret)
+ goto exit_regs;
+
+ ret = gpmi_get_clks(this);
+ if (ret)
+ goto exit_clock;
+ return 0;
+
+exit_clock:
+ release_dma_channels(this);
+exit_regs:
+ return ret;
+}
+
+static void release_resources(struct gpmi_nand_data *this)
+{
+ release_dma_channels(this);
+}
+
+static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
+ dma_free_coherent(dev, geo->auxiliary_size,
+ this->auxiliary_virt,
+ this->auxiliary_phys);
+ kfree(this->data_buffer_dma);
+ kfree(this->raw_buffer);
+
+ this->data_buffer_dma = NULL;
+ this->raw_buffer = NULL;
+}
+
+/* Allocate the DMA buffers */
+static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct device *dev = this->dev;
+ struct mtd_info *mtd = nand_to_mtd(&this->nand);
+
+ /*
+ * [2] Allocate a read/write data buffer.
+ * The gpmi_alloc_dma_buffer can be called twice.
+ * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
+ * is called before the NAND identification; and we allocate a
+ * buffer of the real NAND page size when the gpmi_alloc_dma_buffer
+ * is called after.
+ */
+ this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (this->data_buffer_dma == NULL)
+ goto error_alloc;
+
+ this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
+ &this->auxiliary_phys, GFP_DMA);
+ if (!this->auxiliary_virt)
+ goto error_alloc;
+
+ this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
+ if (!this->raw_buffer)
+ goto error_alloc;
+
+ return 0;
+
+error_alloc:
+ gpmi_free_dma_buffer(this);
+ return -ENOMEM;
+}
+
+/*
+ * Handles block mark swapping.
+ * It can be called in swapping the block mark, or swapping it back,
+ * because the the operations are the same.
+ */
+static void block_mark_swapping(struct gpmi_nand_data *this,
+ void *payload, void *auxiliary)
+{
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ unsigned char *p;
+ unsigned char *a;
+ unsigned int bit;
+ unsigned char mask;
+ unsigned char from_data;
+ unsigned char from_oob;
+
+ if (!this->swap_block_mark)
+ return;
+
+ /*
+ * If control arrives here, we're swapping. Make some convenience
+ * variables.
+ */
+ bit = nfc_geo->block_mark_bit_offset;
+ p = payload + nfc_geo->block_mark_byte_offset;
+ a = auxiliary;
+
+ /*
+ * Get the byte from the data area that overlays the block mark. Since
+ * the ECC engine applies its own view to the bits in the page, the
+ * physical block mark won't (in general) appear on a byte boundary in
+ * the data.
+ */
+ from_data = (p[0] >> bit) | (p[1] << (8 - bit));
+
+ /* Get the byte from the OOB. */
+ from_oob = a[0];
+
+ /* Swap them. */
+ a[0] = from_data;
+
+ mask = (0x1 << bit) - 1;
+ p[0] = (p[0] & mask) | (from_oob << bit);
+
+ mask = ~0 << bit;
+ p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
+}
+
+static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
+ int last, int meta)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i;
+ unsigned char *status;
+ unsigned int max_bitflips = 0;
+
+ /* Loop over status bytes, accumulating ECC status. */
+ status = this->auxiliary_virt + ALIGN(meta, 4);
+
+ for (i = first; i < last; i++, status++) {
+ if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
+ continue;
+
+ if (*status == STATUS_UNCORRECTABLE) {
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *eccbuf = this->raw_buffer;
+ int offset, bitoffset;
+ int eccbytes;
+ int flips;
+
+ /* Read ECC bytes into our internal raw_buffer */
+ offset = nfc_geo->metadata_size * 8;
+ offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
+ offset -= eccbits;
+ bitoffset = offset % 8;
+ eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
+ offset /= 8;
+ eccbytes -= offset;
+ nand_change_read_column_op(chip, offset, eccbuf,
+ eccbytes, false);
+
+ /*
+ * ECC data are not byte aligned and we may have
+ * in-band data in the first and last byte of
+ * eccbuf. Set non-eccbits to one so that
+ * nand_check_erased_ecc_chunk() does not count them
+ * as bitflips.
+ */
+ if (bitoffset)
+ eccbuf[0] |= GENMASK(bitoffset - 1, 0);
+
+ bitoffset = (bitoffset + eccbits) % 8;
+ if (bitoffset)
+ eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
+
+ /*
+ * The ECC hardware has an uncorrectable ECC status
+ * code in case we have bitflips in an erased page. As
+ * nothing was written into this subpage the ECC is
+ * obviously wrong and we can not trust it. We assume
+ * at this point that we are reading an erased page and
+ * try to correct the bitflips in buffer up to
+ * ecc_strength bitflips. If this is a page with random
+ * data, we exceed this number of bitflips and have a
+ * ECC failure. Otherwise we use the corrected buffer.
+ */
+ if (i == 0) {
+ /* The first block includes metadata */
+ flips = nand_check_erased_ecc_chunk(
+ buf + i * nfc_geo->ecc_chunk_size,
+ nfc_geo->ecc_chunk_size,
+ eccbuf, eccbytes,
+ this->auxiliary_virt,
+ nfc_geo->metadata_size,
+ nfc_geo->ecc_strength);
+ } else {
+ flips = nand_check_erased_ecc_chunk(
+ buf + i * nfc_geo->ecc_chunk_size,
+ nfc_geo->ecc_chunk_size,
+ eccbuf, eccbytes,
+ NULL, 0,
+ nfc_geo->ecc_strength);
+ }
+
+ if (flips > 0) {
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ flips);
+ mtd->ecc_stats.corrected += flips;
+ continue;
+ }
+
+ mtd->ecc_stats.failed++;
+ continue;
+ }
+
+ mtd->ecc_stats.corrected += *status;
+ max_bitflips = max_t(unsigned int, max_bitflips, *status);
+ }
+
+ return max_bitflips;
+}
+
+static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ unsigned int ecc_strength = geo->ecc_strength >> 1;
+ unsigned int gf_len = geo->gf_len;
+ unsigned int block_size = geo->ecc_chunk_size;
+
+ this->bch_flashlayout0 =
+ BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
+ BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
+ BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
+ BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
+ BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
+
+ this->bch_flashlayout1 =
+ BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
+ BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
+ BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
+ BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
+}
+
+static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct bch_geometry *geo = &this->bch_geometry;
+ unsigned int max_bitflips;
+ int ret;
+
+ gpmi_bch_layout_std(this);
+ this->bch = true;
+
+ ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
+ if (ret)
+ return ret;
+
+ max_bitflips = gpmi_count_bitflips(chip, buf, 0,
+ geo->ecc_chunk_count,
+ geo->auxiliary_status_offset);
+
+ /* handle the block mark swapping */
+ block_mark_swapping(this, buf, this->auxiliary_virt);
+
+ if (oob_required) {
+ /*
+ * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+ * for details about our policy for delivering the OOB.
+ *
+ * We fill the caller's buffer with set bits, and then copy the
+ * block mark to th caller's buffer. Note that, if block mark
+ * swapping was necessary, it has already been done, so we can
+ * rely on the first byte of the auxiliary buffer to contain
+ * the block mark.
+ */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+ chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
+ }
+
+ return max_bitflips;
+}
+
+/* Fake a virtual small page for the subpage read */
+static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
+ uint32_t len, uint8_t *buf, int page)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *geo = &this->bch_geometry;
+ int size = chip->ecc.size; /* ECC chunk size */
+ int meta, n, page_size;
+ unsigned int max_bitflips;
+ unsigned int ecc_strength;
+ int first, last, marker_pos;
+ int ecc_parity_size;
+ int col = 0;
+ int ret;
+
+ /* The size of ECC parity */
+ ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
+
+ /* Align it with the chunk size */
+ first = offs / size;
+ last = (offs + len - 1) / size;
+
+ if (this->swap_block_mark) {
+ /*
+ * Find the chunk which contains the Block Marker.
+ * If this chunk is in the range of [first, last],
+ * we have to read out the whole page.
+ * Why? since we had swapped the data at the position of Block
+ * Marker to the metadata which is bound with the chunk 0.
+ */
+ marker_pos = geo->block_mark_byte_offset / size;
+ if (last >= marker_pos && first <= marker_pos) {
+ dev_dbg(this->dev,
+ "page:%d, first:%d, last:%d, marker at:%d\n",
+ page, first, last, marker_pos);
+ return gpmi_ecc_read_page(chip, buf, 0, page);
+ }
+ }
+
+ meta = geo->metadata_size;
+ if (first) {
+ col = meta + (size + ecc_parity_size) * first;
+ meta = 0;
+ buf = buf + first * size;
+ }
+
+ ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
+
+ n = last - first + 1;
+ page_size = meta + (size + ecc_parity_size) * n;
+ ecc_strength = geo->ecc_strength >> 1;
+
+ this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
+ BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
+ BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
+ BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
+ BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
+
+ this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
+ BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
+ BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
+ BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
+
+ this->bch = true;
+
+ ret = nand_read_page_op(chip, page, col, buf, page_size);
+ if (ret)
+ return ret;
+
+ dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
+ page, offs, len, col, first, n, page_size);
+
+ max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
+
+ return max_bitflips;
+}
+
+static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int ret;
+
+ dev_dbg(this->dev, "ecc write page.\n");
+
+ gpmi_bch_layout_std(this);
+ this->bch = true;
+
+ memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
+
+ if (this->swap_block_mark) {
+ /*
+ * When doing bad block marker swapping we must always copy the
+ * input buffer as we can't modify the const buffer.
+ */
+ memcpy(this->data_buffer_dma, buf, mtd->writesize);
+ buf = this->data_buffer_dma;
+ block_mark_swapping(this, this->data_buffer_dma,
+ this->auxiliary_virt);
+ }
+
+ ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
+
+ return ret;
+}
+
+/*
+ * There are several places in this driver where we have to handle the OOB and
+ * block marks. This is the function where things are the most complicated, so
+ * this is where we try to explain it all. All the other places refer back to
+ * here.
+ *
+ * These are the rules, in order of decreasing importance:
+ *
+ * 1) Nothing the caller does can be allowed to imperil the block mark.
+ *
+ * 2) In read operations, the first byte of the OOB we return must reflect the
+ * true state of the block mark, no matter where that block mark appears in
+ * the physical page.
+ *
+ * 3) ECC-based read operations return an OOB full of set bits (since we never
+ * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
+ * return).
+ *
+ * 4) "Raw" read operations return a direct view of the physical bytes in the
+ * page, using the conventional definition of which bytes are data and which
+ * are OOB. This gives the caller a way to see the actual, physical bytes
+ * in the page, without the distortions applied by our ECC engine.
+ *
+ *
+ * What we do for this specific read operation depends on two questions:
+ *
+ * 1) Are we doing a "raw" read, or an ECC-based read?
+ *
+ * 2) Are we using block mark swapping or transcription?
+ *
+ * There are four cases, illustrated by the following Karnaugh map:
+ *
+ * | Raw | ECC-based |
+ * -------------+-------------------------+-------------------------+
+ * | Read the conventional | |
+ * | OOB at the end of the | |
+ * Swapping | page and return it. It | |
+ * | contains exactly what | |
+ * | we want. | Read the block mark and |
+ * -------------+-------------------------+ return it in a buffer |
+ * | Read the conventional | full of set bits. |
+ * | OOB at the end of the | |
+ * | page and also the block | |
+ * Transcribing | mark in the metadata. | |
+ * | Copy the block mark | |
+ * | into the first byte of | |
+ * | the OOB. | |
+ * -------------+-------------------------+-------------------------+
+ *
+ * Note that we break rule #4 in the Transcribing/Raw case because we're not
+ * giving an accurate view of the actual, physical bytes in the page (we're
+ * overwriting the block mark). That's OK because it's more important to follow
+ * rule #2.
+ *
+ * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
+ * easy. When reading a page, for example, the NAND Flash MTD code calls our
+ * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
+ * ECC-based or raw view of the page is implicit in which function it calls
+ * (there is a similar pair of ECC-based/raw functions for writing).
+ */
+static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ int ret;
+
+ /* clear the OOB buffer */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+
+ /* Read out the conventional OOB. */
+ ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+ if (ret)
+ return ret;
+
+ /*
+ * Now, we want to make sure the block mark is correct. In the
+ * non-transcribing case (!GPMI_IS_MX23()), we already have it.
+ * Otherwise, we need to explicitly read it.
+ */
+ if (GPMI_IS_MX23(this)) {
+ /* Read the block mark into the first byte of the OOB buffer. */
+ ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtd_oob_region of = { };
+
+ /* Do we have available oob area? */
+ mtd_ooblayout_free(mtd, 0, &of);
+ if (!of.length)
+ return -EPERM;
+
+ if (!nand_is_slc(chip))
+ return -EPERM;
+
+ return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
+ chip->oob_poi + of.offset, of.length);
+}
+
+/*
+ * This function reads a NAND page without involving the ECC engine (no HW
+ * ECC correction).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the payload data and ECC bits stored in the
+ * page into the provided buffers, which is why we're using nand_extract_bits().
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int eccsize = nfc_geo->ecc_chunk_size;
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *tmp_buf = this->raw_buffer;
+ size_t src_bit_off;
+ size_t oob_bit_off;
+ size_t oob_byte_off;
+ uint8_t *oob = chip->oob_poi;
+ int step;
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
+ if (ret)
+ return ret;
+
+ /*
+ * If required, swap the bad block marker and the data stored in the
+ * metadata section, so that we don't wrongly consider a block as bad.
+ *
+ * See the layout description for a detailed explanation on why this
+ * is needed.
+ */
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
+
+ /*
+ * Copy the metadata section into the oob buffer (this section is
+ * guaranteed to be aligned on a byte boundary).
+ */
+ if (oob_required)
+ memcpy(oob, tmp_buf, nfc_geo->metadata_size);
+
+ oob_bit_off = nfc_geo->metadata_size * 8;
+ src_bit_off = oob_bit_off;
+
+ /* Extract interleaved payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+ nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
+ src_bit_off, eccsize * 8);
+ src_bit_off += eccsize * 8;
+
+ /* Align last ECC block to align a byte boundary */
+ if (step == nfc_geo->ecc_chunk_count - 1 &&
+ (oob_bit_off + eccbits) % 8)
+ eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+ if (oob_required)
+ nand_extract_bits(oob, oob_bit_off, tmp_buf,
+ src_bit_off, eccbits);
+
+ src_bit_off += eccbits;
+ oob_bit_off += eccbits;
+ }
+
+ if (oob_required) {
+ oob_byte_off = oob_bit_off / 8;
+
+ if (oob_byte_off < mtd->oobsize)
+ memcpy(oob + oob_byte_off,
+ tmp_buf + mtd->writesize + oob_byte_off,
+ mtd->oobsize - oob_byte_off);
+ }
+
+ return 0;
+}
+
+/*
+ * This function writes a NAND page without involving the ECC engine (no HW
+ * ECC generation).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the OOB area at the right place in the
+ * final page, which is why we're using nand_extract_bits().
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int eccsize = nfc_geo->ecc_chunk_size;
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *tmp_buf = this->raw_buffer;
+ uint8_t *oob = chip->oob_poi;
+ size_t dst_bit_off;
+ size_t oob_bit_off;
+ size_t oob_byte_off;
+ int step;
+
+ /*
+ * Initialize all bits to 1 in case we don't have a buffer for the
+ * payload or oob data in order to leave unspecified bits of data
+ * to their initial state.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ /*
+ * First copy the metadata section (stored in oob buffer) at the
+ * beginning of the page, as imposed by the GPMI layout.
+ */
+ memcpy(tmp_buf, oob, nfc_geo->metadata_size);
+ oob_bit_off = nfc_geo->metadata_size * 8;
+ dst_bit_off = oob_bit_off;
+
+ /* Interleave payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+ nand_extract_bits(tmp_buf, dst_bit_off, buf,
+ step * eccsize * 8, eccsize * 8);
+ dst_bit_off += eccsize * 8;
+
+ /* Align last ECC block to align a byte boundary */
+ if (step == nfc_geo->ecc_chunk_count - 1 &&
+ (oob_bit_off + eccbits) % 8)
+ eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+ if (oob_required)
+ nand_extract_bits(tmp_buf, dst_bit_off, oob,
+ oob_bit_off, eccbits);
+
+ dst_bit_off += eccbits;
+ oob_bit_off += eccbits;
+ }
+
+ oob_byte_off = oob_bit_off / 8;
+
+ if (oob_required && oob_byte_off < mtd->oobsize)
+ memcpy(tmp_buf + mtd->writesize + oob_byte_off,
+ oob + oob_byte_off, mtd->oobsize - oob_byte_off);
+
+ /*
+ * If required, swap the bad block marker and the first byte of the
+ * metadata section, so that we don't modify the bad block marker.
+ *
+ * See the layout description for a detailed explanation on why this
+ * is needed.
+ */
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
+
+ return nand_prog_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
+}
+
+static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
+{
+ return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
+}
+
+static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
+{
+ return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
+}
+
+static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ int ret = 0;
+ uint8_t *block_mark;
+ int column, page, chipnr;
+
+ chipnr = (int)(ofs >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
+
+ /* Write the block mark. */
+ block_mark = this->data_buffer_dma;
+ block_mark[0] = 0; /* bad block marker */
+
+ /* Shift to get page */
+ page = (int)(ofs >> chip->page_shift);
+
+ ret = nand_prog_page_op(chip, page, column, block_mark, 1);
+
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static int nand_boot_set_geometry(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *geometry = &this->rom_geometry;
+
+ /*
+ * Set the boot block stride size.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->stride_size_in_pages = 64;
+
+ /*
+ * Set the search area stride exponent.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->search_area_stride_exponent = 2;
+ return 0;
+}
+
+static const char *fingerprint = "STMP";
+static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct device *dev = this->dev;
+ struct nand_chip *chip = &this->nand;
+ unsigned int search_area_size_in_strides;
+ unsigned int stride;
+ unsigned int page;
+ u8 *buffer = nand_get_data_buf(chip);
+ int found_an_ncb_fingerprint = false;
+ int ret;
+
+ /* Compute the number of strides in a search area. */
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+
+ nand_select_target(chip, 0);
+
+ /*
+ * Loop through the first search area, looking for the NCB fingerprint.
+ */
+ dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
+
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+
+ dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
+
+ /*
+ * Read the NCB fingerprint. The fingerprint is four bytes long
+ * and starts in the 12th byte of the page.
+ */
+ ret = nand_read_page_op(chip, page, 12, buffer,
+ strlen(fingerprint));
+ if (ret)
+ continue;
+
+ /* Look for the fingerprint. */
+ if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
+ found_an_ncb_fingerprint = true;
+ break;
+ }
+
+ }
+
+ nand_deselect_target(chip);
+
+ if (found_an_ncb_fingerprint)
+ dev_dbg(dev, "\tFound a fingerprint\n");
+ else
+ dev_dbg(dev, "\tNo fingerprint found\n");
+ return found_an_ncb_fingerprint;
+}
+
+/* Writes a transcription stamp. */
+static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int block_size_in_pages;
+ unsigned int search_area_size_in_strides;
+ unsigned int search_area_size_in_pages;
+ unsigned int search_area_size_in_blocks;
+ unsigned int block;
+ unsigned int stride;
+ unsigned int page;
+ u8 *buffer = nand_get_data_buf(chip);
+ int status;
+
+ /* Compute the search area geometry. */
+ block_size_in_pages = mtd->erasesize / mtd->writesize;
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+ search_area_size_in_pages = search_area_size_in_strides *
+ rom_geo->stride_size_in_pages;
+ search_area_size_in_blocks =
+ (search_area_size_in_pages + (block_size_in_pages - 1)) /
+ block_size_in_pages;
+
+ dev_dbg(dev, "Search Area Geometry :\n");
+ dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
+ dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
+ dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
+
+ nand_select_target(chip, 0);
+
+ /* Loop over blocks in the first search area, erasing them. */
+ dev_dbg(dev, "Erasing the search area...\n");
+
+ for (block = 0; block < search_area_size_in_blocks; block++) {
+ /* Erase this block. */
+ dev_dbg(dev, "\tErasing block 0x%x\n", block);
+ status = nand_erase_op(chip, block);
+ if (status)
+ dev_err(dev, "[%s] Erase failed.\n", __func__);
+ }
+
+ /* Write the NCB fingerprint into the page buffer. */
+ memset(buffer, ~0, mtd->writesize);
+ memcpy(buffer + 12, fingerprint, strlen(fingerprint));
+
+ /* Loop through the first search area, writing NCB fingerprints. */
+ dev_dbg(dev, "Writing NCB fingerprints...\n");
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+
+ /* Write the first page of the current stride. */
+ dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
+
+ status = chip->ecc.write_page_raw(chip, buffer, 0, page);
+ if (status)
+ dev_err(dev, "[%s] Write failed.\n", __func__);
+ }
+
+ nand_deselect_target(chip);
+
+ return 0;
+}
+
+static int mx23_boot_init(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int block_count;
+ unsigned int block;
+ int chipnr;
+ int page;
+ loff_t byte;
+ uint8_t block_mark;
+ int ret = 0;
+
+ /*
+ * If control arrives here, we can't use block mark swapping, which
+ * means we're forced to use transcription. First, scan for the
+ * transcription stamp. If we find it, then we don't have to do
+ * anything -- the block marks are already transcribed.
+ */
+ if (mx23_check_transcription_stamp(this))
+ return 0;
+
+ /*
+ * If control arrives here, we couldn't find a transcription stamp, so
+ * so we presume the block marks are in the conventional location.
+ */
+ dev_dbg(dev, "Transcribing bad block marks...\n");
+
+ /* Compute the number of blocks in the entire medium. */
+ block_count = nanddev_eraseblocks_per_target(&chip->base);
+
+ /*
+ * Loop over all the blocks in the medium, transcribing block marks as
+ * we go.
+ */
+ for (block = 0; block < block_count; block++) {
+ /*
+ * Compute the chip, page and byte addresses for this block's
+ * conventional mark.
+ */
+ chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
+ page = block << (chip->phys_erase_shift - chip->page_shift);
+ byte = block << chip->phys_erase_shift;
+
+ /* Send the command to read the conventional block mark. */
+ nand_select_target(chip, chipnr);
+ ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
+ 1);
+ nand_deselect_target(chip);
+
+ if (ret)
+ continue;
+
+ /*
+ * Check if the block is marked bad. If so, we need to mark it
+ * again, but this time the result will be a mark in the
+ * location where we transcribe block marks.
+ */
+ if (block_mark != 0xff) {
+ dev_dbg(dev, "Transcribing mark in block %u\n", block);
+ ret = chip->legacy.block_markbad(chip, byte);
+ if (ret)
+ dev_err(dev,
+ "Failed to mark block bad with ret %d\n",
+ ret);
+ }
+ }
+
+ /* Write the stamp that indicates we've transcribed the block marks. */
+ mx23_write_transcription_stamp(this);
+ return 0;
+}
+
+static int nand_boot_init(struct gpmi_nand_data *this)
+{
+ nand_boot_set_geometry(this);
+
+ /* This is ROM arch-specific initilization before the BBT scanning. */
+ if (GPMI_IS_MX23(this))
+ return mx23_boot_init(this);
+ return 0;
+}
+
+static int gpmi_set_geometry(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /* Free the temporary DMA memory for reading ID. */
+ gpmi_free_dma_buffer(this);
+
+ /* Set up the NFC geometry which is used by BCH. */
+ ret = bch_set_geometry(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
+ return ret;
+ }
+
+ /* Alloc the new DMA buffers according to the pagesize and oobsize */
+ return gpmi_alloc_dma_buffer(this);
+}
+
+static int gpmi_init_last(struct gpmi_nand_data *this)
+{
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct bch_geometry *bch_geo = &this->bch_geometry;
+ int ret;
+
+ /* Set up the medium geometry */
+ ret = gpmi_set_geometry(this);
+ if (ret)
+ return ret;
+
+ /* Init the nand_ecc_ctrl{} */
+ ecc->read_page = gpmi_ecc_read_page;
+ ecc->write_page = gpmi_ecc_write_page;
+ ecc->read_oob = gpmi_ecc_read_oob;
+ ecc->write_oob = gpmi_ecc_write_oob;
+ ecc->read_page_raw = gpmi_ecc_read_page_raw;
+ ecc->write_page_raw = gpmi_ecc_write_page_raw;
+ ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
+ ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ ecc->size = bch_geo->ecc_chunk_size;
+ ecc->strength = bch_geo->ecc_strength;
+ mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
+
+ /*
+ * We only enable the subpage read when:
+ * (1) the chip is imx6, and
+ * (2) the size of the ECC parity is byte aligned.
+ */
+ if (GPMI_IS_MX6(this) &&
+ ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
+ ecc->read_subpage = gpmi_ecc_read_subpage;
+ chip->options |= NAND_SUBPAGE_READ;
+ }
+
+ return 0;
+}
+
+static int gpmi_nand_attach_chip(struct nand_chip *chip)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ int ret;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (of_property_read_bool(this->dev->of_node,
+ "fsl,no-blockmark-swap"))
+ this->swap_block_mark = false;
+ }
+ dev_dbg(this->dev, "Blockmark swapping %sabled\n",
+ this->swap_block_mark ? "en" : "dis");
+
+ ret = gpmi_init_last(this);
+ if (ret)
+ return ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ return 0;
+}
+
+static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
+{
+ struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
+
+ this->ntransfers++;
+
+ if (this->ntransfers == GPMI_MAX_TRANSFERS)
+ return NULL;
+
+ return transfer;
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_command(
+ struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
+{
+ struct dma_chan *channel = get_dma_chan(this);
+ struct dma_async_tx_descriptor *desc;
+ struct gpmi_transfer *transfer;
+ int chip = this->nand.cur_cs;
+ u32 pio[3];
+
+ /* [1] send out the PIO words */
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
+ | BM_GPMI_CTRL0_ADDRESS_INCREMENT
+ | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
+ pio[1] = 0;
+ pio[2] = 0;
+ desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
+ DMA_TRANS_NONE, 0);
+ if (!desc)
+ return NULL;
+
+ transfer = get_next_transfer(this);
+ if (!transfer)
+ return NULL;
+
+ transfer->cmdbuf[0] = cmd;
+ if (naddr)
+ memcpy(&transfer->cmdbuf[1], addr, naddr);
+
+ sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
+ dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
+
+ transfer->direction = DMA_TO_DEVICE;
+
+ desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
+ MXS_DMA_CTRL_WAIT4END);
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
+ struct gpmi_nand_data *this)
+{
+ struct dma_chan *channel = get_dma_chan(this);
+ u32 pio[2];
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
+ pio[1] = 0;
+
+ return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
+ MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_data_read(
+ struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ struct gpmi_transfer *transfer;
+ u32 pio[6] = {};
+
+ transfer = get_next_transfer(this);
+ if (!transfer)
+ return NULL;
+
+ transfer->direction = DMA_FROM_DEVICE;
+
+ *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
+ DMA_FROM_DEVICE);
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+ | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
+
+ if (this->bch) {
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
+ | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
+ pio[3] = raw_len;
+ pio[4] = transfer->sgl.dma_address;
+ pio[5] = this->auxiliary_phys;
+ }
+
+ desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
+ DMA_TRANS_NONE, 0);
+ if (!desc)
+ return NULL;
+
+ if (!this->bch)
+ desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
+ DMA_DEV_TO_MEM,
+ MXS_DMA_CTRL_WAIT4END);
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_data_write(
+ struct gpmi_nand_data *this, const void *buf, int raw_len)
+{
+ struct dma_chan *channel = get_dma_chan(this);
+ struct dma_async_tx_descriptor *desc;
+ struct gpmi_transfer *transfer;
+ u32 pio[6] = {};
+
+ transfer = get_next_transfer(this);
+ if (!transfer)
+ return NULL;
+
+ transfer->direction = DMA_TO_DEVICE;
+
+ prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+ | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
+
+ if (this->bch) {
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
+ BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
+ pio[3] = raw_len;
+ pio[4] = transfer->sgl.dma_address;
+ pio[5] = this->auxiliary_phys;
+ }
+
+ desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
+ DMA_TRANS_NONE,
+ (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
+ if (!desc)
+ return NULL;
+
+ if (!this->bch)
+ desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
+ DMA_MEM_TO_DEV,
+ MXS_DMA_CTRL_WAIT4END);
+
+ return desc;
+}
+
+static int gpmi_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ const struct nand_op_instr *instr;
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct dma_async_tx_descriptor *desc = NULL;
+ int i, ret, buf_len = 0, nbufs = 0;
+ u8 cmd = 0;
+ void *buf_read = NULL;
+ const void *buf_write = NULL;
+ bool direct = false;
+ struct completion *dma_completion, *bch_completion;
+ unsigned long to;
+
+ if (check_only)
+ return 0;
+
+ this->ntransfers = 0;
+ for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
+ this->transfers[i].direction = DMA_NONE;
+
+ ret = pm_runtime_get_sync(this->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(this->dev);
+ return ret;
+ }
+
+ /*
+ * This driver currently supports only one NAND chip. Plus, dies share
+ * the same configuration. So once timings have been applied on the
+ * controller side, they will not change anymore. When the time will
+ * come, the check on must_apply_timings will have to be dropped.
+ */
+ if (this->hw.must_apply_timings) {
+ this->hw.must_apply_timings = false;
+ ret = gpmi_nfc_apply_timings(this);
+ if (ret)
+ goto out_pm;
+ }
+
+ dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ nand_op_trace(" ", instr);
+
+ switch (instr->type) {
+ case NAND_OP_WAITRDY_INSTR:
+ desc = gpmi_chain_wait_ready(this);
+ break;
+ case NAND_OP_CMD_INSTR:
+ cmd = instr->ctx.cmd.opcode;
+
+ /*
+ * When this command has an address cycle chain it
+ * together with the address cycle
+ */
+ if (i + 1 != op->ninstrs &&
+ op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
+ continue;
+
+ desc = gpmi_chain_command(this, cmd, NULL, 0);
+
+ break;
+ case NAND_OP_ADDR_INSTR:
+ desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
+ instr->ctx.addr.naddrs);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ buf_write = instr->ctx.data.buf.out;
+ buf_len = instr->ctx.data.len;
+ nbufs++;
+
+ desc = gpmi_chain_data_write(this, buf_write, buf_len);
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ if (!instr->ctx.data.len)
+ break;
+ buf_read = instr->ctx.data.buf.in;
+ buf_len = instr->ctx.data.len;
+ nbufs++;
+
+ desc = gpmi_chain_data_read(this, buf_read, buf_len,
+ &direct);
+ break;
+ }
+
+ if (!desc) {
+ ret = -ENXIO;
+ goto unmap;
+ }
+ }
+
+ dev_dbg(this->dev, "%s setup done\n", __func__);
+
+ if (nbufs > 1) {
+ dev_err(this->dev, "Multiple data instructions not supported\n");
+ ret = -EINVAL;
+ goto unmap;
+ }
+
+ if (this->bch) {
+ writel(this->bch_flashlayout0,
+ this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
+ writel(this->bch_flashlayout1,
+ this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
+ }
+
+ desc->callback = dma_irq_callback;
+ desc->callback_param = this;
+ dma_completion = &this->dma_done;
+ bch_completion = NULL;
+
+ init_completion(dma_completion);
+
+ if (this->bch && buf_read) {
+ writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+ this->resources.bch_regs + HW_BCH_CTRL_SET);
+ bch_completion = &this->bch_done;
+ init_completion(bch_completion);
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(get_dma_chan(this));
+
+ to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
+ if (!to) {
+ dev_err(this->dev, "DMA timeout, last DMA\n");
+ gpmi_dump_info(this);
+ ret = -ETIMEDOUT;
+ goto unmap;
+ }
+
+ if (this->bch && buf_read) {
+ to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000));
+ if (!to) {
+ dev_err(this->dev, "BCH timeout, last DMA\n");
+ gpmi_dump_info(this);
+ ret = -ETIMEDOUT;
+ goto unmap;
+ }
+ }
+
+ writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+ this->resources.bch_regs + HW_BCH_CTRL_CLR);
+ gpmi_clear_bch(this);
+
+ ret = 0;
+
+unmap:
+ for (i = 0; i < this->ntransfers; i++) {
+ struct gpmi_transfer *transfer = &this->transfers[i];
+
+ if (transfer->direction != DMA_NONE)
+ dma_unmap_sg(this->dev, &transfer->sgl, 1,
+ transfer->direction);
+ }
+
+ if (!ret && buf_read && !direct)
+ memcpy(buf_read, this->data_buffer_dma,
+ gpmi_raw_len_to_len(this, buf_len));
+
+ this->bch = false;
+
+out_pm:
+ pm_runtime_mark_last_busy(this->dev);
+ pm_runtime_put_autosuspend(this->dev);
+
+ return ret;
+}
+
+static const struct nand_controller_ops gpmi_nand_controller_ops = {
+ .attach_chip = gpmi_nand_attach_chip,
+ .setup_interface = gpmi_setup_interface,
+ .exec_op = gpmi_nfc_exec_op,
+};
+
+static int gpmi_nand_init(struct gpmi_nand_data *this)
+{
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /* init the MTD data structures */
+ mtd->name = "gpmi-nand";
+ mtd->dev.parent = this->dev;
+
+ /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
+ nand_set_controller_data(chip, this);
+ nand_set_flash_node(chip, this->pdev->dev.of_node);
+ chip->legacy.block_markbad = gpmi_block_markbad;
+ chip->badblock_pattern = &gpmi_bbt_descr;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+ this->swap_block_mark = !GPMI_IS_MX23(this);
+
+ /*
+ * Allocate a temporary DMA buffer for reading ID in the
+ * nand_scan_ident().
+ */
+ this->bch_geometry.payload_size = 1024;
+ this->bch_geometry.auxiliary_size = 128;
+ ret = gpmi_alloc_dma_buffer(this);
+ if (ret)
+ return ret;
+
+ nand_controller_init(&this->base);
+ this->base.ops = &gpmi_nand_controller_ops;
+ chip->controller = &this->base;
+
+ ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
+ if (ret)
+ goto err_out;
+
+ ret = nand_boot_init(this);
+ if (ret)
+ goto err_nand_cleanup;
+ ret = nand_create_bbt(chip);
+ if (ret)
+ goto err_nand_cleanup;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_nand_cleanup;
+ return 0;
+
+err_nand_cleanup:
+ nand_cleanup(chip);
+err_out:
+ gpmi_free_dma_buffer(this);
+ return ret;
+}
+
+static const struct of_device_id gpmi_nand_id_table[] = {
+ {
+ .compatible = "fsl,imx23-gpmi-nand",
+ .data = &gpmi_devdata_imx23,
+ }, {
+ .compatible = "fsl,imx28-gpmi-nand",
+ .data = &gpmi_devdata_imx28,
+ }, {
+ .compatible = "fsl,imx6q-gpmi-nand",
+ .data = &gpmi_devdata_imx6q,
+ }, {
+ .compatible = "fsl,imx6sx-gpmi-nand",
+ .data = &gpmi_devdata_imx6sx,
+ }, {
+ .compatible = "fsl,imx7d-gpmi-nand",
+ .data = &gpmi_devdata_imx7d,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
+
+static int gpmi_nand_probe(struct platform_device *pdev)
+{
+ struct gpmi_nand_data *this;
+ const struct of_device_id *of_id;
+ int ret;
+
+ this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
+ if (!this)
+ return -ENOMEM;
+
+ of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
+ if (of_id) {
+ this->devdata = of_id->data;
+ } else {
+ dev_err(&pdev->dev, "Failed to find the right device id.\n");
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, this);
+ this->pdev = pdev;
+ this->dev = &pdev->dev;
+
+ ret = acquire_resources(this);
+ if (ret)
+ goto exit_acquire_resources;
+
+ ret = __gpmi_enable_clk(this, true);
+ if (ret)
+ goto exit_acquire_resources;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = gpmi_init(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ ret = gpmi_nand_init(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ dev_info(this->dev, "driver registered.\n");
+
+ return 0;
+
+exit_nfc_init:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ release_resources(this);
+exit_acquire_resources:
+
+ return ret;
+}
+
+static int gpmi_nand_remove(struct platform_device *pdev)
+{
+ struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &this->nand;
+ int ret;
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ gpmi_free_dma_buffer(this);
+ release_resources(this);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gpmi_pm_suspend(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+ release_dma_channels(this);
+ return 0;
+}
+
+static int gpmi_pm_resume(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+ int ret;
+
+ ret = acquire_dma_channels(this);
+ if (ret < 0)
+ return ret;
+
+ /* re-init the GPMI registers */
+ ret = gpmi_init(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting GPMI : %d\n", ret);
+ return ret;
+ }
+
+ /* Set flag to get timing setup restored for next exec_op */
+ if (this->hw.clk_rate)
+ this->hw.must_apply_timings = true;
+
+ /* re-init the BCH registers */
+ ret = bch_set_geometry(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting BCH : %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+ return __gpmi_enable_clk(this, false);
+}
+
+static int __maybe_unused gpmi_runtime_resume(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+ return __gpmi_enable_clk(this, true);
+}
+
+static const struct dev_pm_ops gpmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
+ SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
+};
+
+static struct platform_driver gpmi_nand_driver = {
+ .driver = {
+ .name = "gpmi-nand",
+ .pm = &gpmi_pm_ops,
+ .of_match_table = gpmi_nand_id_table,
+ },
+ .probe = gpmi_nand_probe,
+ .remove = gpmi_nand_remove,
+};
+module_platform_driver(gpmi_nand_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
new file mode 100644
index 000000000..fdc5ed7de
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ */
+#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
+#define __DRIVERS_MTD_NAND_GPMI_NAND_H
+
+#include <linux/mtd/rawnand.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */
+struct resources {
+ void __iomem *gpmi_regs;
+ void __iomem *bch_regs;
+ unsigned int dma_low_channel;
+ unsigned int dma_high_channel;
+ struct clk *clock[GPMI_CLK_MAX];
+};
+
+/**
+ * struct bch_geometry - BCH geometry description.
+ * @gf_len: The length of Galois Field. (e.g., 13 or 14)
+ * @ecc_strength: A number that describes the strength of the ECC
+ * algorithm.
+ * @page_size: The size, in bytes, of a physical page, including
+ * both data and OOB.
+ * @metadata_size: The size, in bytes, of the metadata.
+ * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note
+ * the first chunk in the page includes both data and
+ * metadata, so it's a bit larger than this value.
+ * @ecc_chunk_count: The number of ECC chunks in the page,
+ * @payload_size: The size, in bytes, of the payload buffer.
+ * @auxiliary_size: The size, in bytes, of the auxiliary buffer.
+ * @auxiliary_status_offset: The offset into the auxiliary buffer at which
+ * the ECC status appears.
+ * @block_mark_byte_offset: The byte offset in the ECC-based page view at
+ * which the underlying physical block mark appears.
+ * @block_mark_bit_offset: The bit offset into the ECC-based page view at
+ * which the underlying physical block mark appears.
+ */
+struct bch_geometry {
+ unsigned int gf_len;
+ unsigned int ecc_strength;
+ unsigned int page_size;
+ unsigned int metadata_size;
+ unsigned int ecc_chunk_size;
+ unsigned int ecc_chunk_count;
+ unsigned int payload_size;
+ unsigned int auxiliary_size;
+ unsigned int auxiliary_status_offset;
+ unsigned int block_mark_byte_offset;
+ unsigned int block_mark_bit_offset;
+};
+
+/**
+ * struct boot_rom_geometry - Boot ROM geometry description.
+ * @stride_size_in_pages: The size of a boot block stride, in pages.
+ * @search_area_stride_exponent: The logarithm to base 2 of the size of a
+ * search area in boot block strides.
+ */
+struct boot_rom_geometry {
+ unsigned int stride_size_in_pages;
+ unsigned int search_area_stride_exponent;
+};
+
+enum gpmi_type {
+ IS_MX23,
+ IS_MX28,
+ IS_MX6Q,
+ IS_MX6SX,
+ IS_MX7D,
+};
+
+struct gpmi_devdata {
+ enum gpmi_type type;
+ int bch_max_ecc_strength;
+ int max_chain_delay; /* See the async EDO mode */
+ const char * const *clks;
+ const int clks_count;
+};
+
+/**
+ * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
+ * @must_apply_timings: Whether controller timings have already been
+ * applied or not (useful only while there is
+ * support for only one chip select)
+ * @clk_rate: The clock rate that must be used to derive the
+ * following parameters
+ * @timing0: HW_GPMI_TIMING0 register
+ * @timing1: HW_GPMI_TIMING1 register
+ * @ctrl1n: HW_GPMI_CTRL1n register
+ */
+struct gpmi_nfc_hardware_timing {
+ bool must_apply_timings;
+ unsigned long int clk_rate;
+ u32 timing0;
+ u32 timing1;
+ u32 ctrl1n;
+};
+
+#define GPMI_MAX_TRANSFERS 8
+
+struct gpmi_transfer {
+ u8 cmdbuf[8];
+ struct scatterlist sgl;
+ enum dma_data_direction direction;
+};
+
+struct gpmi_nand_data {
+ /* Devdata */
+ const struct gpmi_devdata *devdata;
+
+ /* System Interface */
+ struct device *dev;
+ struct platform_device *pdev;
+
+ /* Resources */
+ struct resources resources;
+
+ /* Flash Hardware */
+ struct gpmi_nfc_hardware_timing hw;
+
+ /* BCH */
+ struct bch_geometry bch_geometry;
+ struct completion bch_done;
+
+ /* NAND Boot issue */
+ bool swap_block_mark;
+ struct boot_rom_geometry rom_geometry;
+
+ /* MTD / NAND */
+ struct nand_controller base;
+ struct nand_chip nand;
+
+ struct gpmi_transfer transfers[GPMI_MAX_TRANSFERS];
+ int ntransfers;
+
+ bool bch;
+ uint32_t bch_flashlayout0;
+ uint32_t bch_flashlayout1;
+
+ char *data_buffer_dma;
+
+ void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+
+ void *raw_buffer;
+
+ /* DMA channels */
+#define DMA_CHANS 8
+ struct dma_chan *dma_chans[DMA_CHANS];
+ struct completion dma_done;
+};
+
+/* BCH : Status Block Completion Codes */
+#define STATUS_GOOD 0x00
+#define STATUS_ERASED 0xff
+#define STATUS_UNCORRECTABLE 0xfe
+
+/* Use the devdata to distinguish different Archs. */
+#define GPMI_IS_MX23(x) ((x)->devdata->type == IS_MX23)
+#define GPMI_IS_MX28(x) ((x)->devdata->type == IS_MX28)
+#define GPMI_IS_MX6Q(x) ((x)->devdata->type == IS_MX6Q)
+#define GPMI_IS_MX6SX(x) ((x)->devdata->type == IS_MX6SX)
+#define GPMI_IS_MX7D(x) ((x)->devdata->type == IS_MX7D)
+
+#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x) || \
+ GPMI_IS_MX7D(x))
+#define GPMI_IS_MXS(x) (GPMI_IS_MX23(x) || GPMI_IS_MX28(x))
+#endif
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h
new file mode 100644
index 000000000..f5e4f26c3
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ */
+#ifndef __GPMI_NAND_GPMI_REGS_H
+#define __GPMI_NAND_GPMI_REGS_H
+
+#define HW_GPMI_CTRL0 0x00000000
+#define HW_GPMI_CTRL0_SET 0x00000004
+#define HW_GPMI_CTRL0_CLR 0x00000008
+#define HW_GPMI_CTRL0_TOG 0x0000000c
+
+#define BP_GPMI_CTRL0_COMMAND_MODE 24
+#define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE)
+#define BF_GPMI_CTRL0_COMMAND_MODE(v) \
+ (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
+#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2
+#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3
+
+#define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23)
+#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0
+#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1
+
+/*
+ * Difference in LOCK_CS between imx23 and imx28 :
+ * This bit may impact the _POWER_ consumption. So some chips
+ * do not set it.
+ */
+#define MX23_BP_GPMI_CTRL0_LOCK_CS 22
+#define MX28_BP_GPMI_CTRL0_LOCK_CS 27
+#define LOCK_CS_ENABLE 0x1
+#define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0
+
+/* Difference in CS between imx23 and imx28 */
+#define BP_GPMI_CTRL0_CS 20
+#define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS)
+#define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS)
+#define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \
+ (GPMI_IS_MX23((x)) \
+ ? MX23_BM_GPMI_CTRL0_CS \
+ : MX28_BM_GPMI_CTRL0_CS))
+
+#define BP_GPMI_CTRL0_ADDRESS 17
+#define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS)
+#define BF_GPMI_CTRL0_ADDRESS(v) \
+ (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
+#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0
+#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1
+#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2
+
+#define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16)
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1
+
+#define BP_GPMI_CTRL0_XFER_COUNT 0
+#define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT)
+#define BF_GPMI_CTRL0_XFER_COUNT(v) \
+ (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
+
+#define HW_GPMI_COMPARE 0x00000010
+
+#define HW_GPMI_ECCCTRL 0x00000020
+#define HW_GPMI_ECCCTRL_SET 0x00000024
+#define HW_GPMI_ECCCTRL_CLR 0x00000028
+#define HW_GPMI_ECCCTRL_TOG 0x0000002c
+
+#define BP_GPMI_ECCCTRL_ECC_CMD 13
+#define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD)
+#define BF_GPMI_ECCCTRL_ECC_CMD(v) \
+ (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1
+
+#define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12)
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0
+
+#define BP_GPMI_ECCCTRL_BUFFER_MASK 0
+#define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
+#define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \
+ (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF
+
+#define HW_GPMI_ECCCOUNT 0x00000030
+#define HW_GPMI_PAYLOAD 0x00000040
+#define HW_GPMI_AUXILIARY 0x00000050
+#define HW_GPMI_CTRL1 0x00000060
+#define HW_GPMI_CTRL1_SET 0x00000064
+#define HW_GPMI_CTRL1_CLR 0x00000068
+#define HW_GPMI_CTRL1_TOG 0x0000006c
+
+#define BP_GPMI_CTRL1_DECOUPLE_CS 24
+#define BM_GPMI_CTRL1_DECOUPLE_CS (1 << BP_GPMI_CTRL1_DECOUPLE_CS)
+
+#define BP_GPMI_CTRL1_WRN_DLY_SEL 22
+#define BM_GPMI_CTRL1_WRN_DLY_SEL (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
+#define BF_GPMI_CTRL1_WRN_DLY_SEL(v) \
+ (((v) << BP_GPMI_CTRL1_WRN_DLY_SEL) & BM_GPMI_CTRL1_WRN_DLY_SEL)
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS 0x0
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_6_TO_10NS 0x1
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3
+
+#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
+
+#define BP_GPMI_CTRL1_DLL_ENABLE 17
+#define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE)
+
+#define BP_GPMI_CTRL1_HALF_PERIOD 16
+#define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD)
+
+#define BP_GPMI_CTRL1_RDN_DELAY 12
+#define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY)
+#define BF_GPMI_CTRL1_RDN_DELAY(v) \
+ (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
+
+#define BM_GPMI_CTRL1_DEV_RESET (1 << 3)
+#define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0
+#define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1
+
+#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2)
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1
+
+#define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1)
+#define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0
+#define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1
+
+#define BM_GPMI_CTRL1_GPMI_MODE (1 << 0)
+
+#define BM_GPMI_CTRL1_CLEAR_MASK (BM_GPMI_CTRL1_WRN_DLY_SEL | \
+ BM_GPMI_CTRL1_DLL_ENABLE | \
+ BM_GPMI_CTRL1_RDN_DELAY | \
+ BM_GPMI_CTRL1_HALF_PERIOD)
+
+#define HW_GPMI_TIMING0 0x00000070
+
+#define BP_GPMI_TIMING0_ADDRESS_SETUP 16
+#define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
+#define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
+
+#define BP_GPMI_TIMING0_DATA_HOLD 8
+#define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD)
+#define BF_GPMI_TIMING0_DATA_HOLD(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
+
+#define BP_GPMI_TIMING0_DATA_SETUP 0
+#define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP)
+#define BF_GPMI_TIMING0_DATA_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
+
+#define HW_GPMI_TIMING1 0x00000080
+#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
+#define BM_GPMI_TIMING1_BUSY_TIMEOUT (0xffff << BP_GPMI_TIMING1_BUSY_TIMEOUT)
+#define BF_GPMI_TIMING1_BUSY_TIMEOUT(v) \
+ (((v) << BP_GPMI_TIMING1_BUSY_TIMEOUT) & BM_GPMI_TIMING1_BUSY_TIMEOUT)
+
+#define HW_GPMI_TIMING2 0x00000090
+#define HW_GPMI_DATA 0x000000a0
+
+/* MX28 uses this to detect READY. */
+#define HW_GPMI_STAT 0x000000b0
+#define MX28_BP_GPMI_STAT_READY_BUSY 24
+#define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY)
+#define MX28_BF_GPMI_STAT_READY_BUSY(v) \
+ (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
+
+/* MX23 uses this to detect READY. */
+#define HW_GPMI_DEBUG 0x000000c0
+#define MX23_BP_GPMI_DEBUG_READY0 28
+#define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0)
+#endif
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
new file mode 100644
index 000000000..8b2122ce6
--- /dev/null
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hisilicon NAND Flash controller driver
+ *
+ * Copyright © 2012-2014 HiSilicon Technologies Co., Ltd.
+ * http://www.hisilicon.com
+ *
+ * Author: Zhou Wang <wangzhou.bry@gmail.com>
+ * The initial developer of the original code is Zhiyong Cai
+ * <caizhiyong@huawei.com>
+ */
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sizes.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+
+#define HINFC504_MAX_CHIP (4)
+#define HINFC504_W_LATCH (5)
+#define HINFC504_R_LATCH (7)
+#define HINFC504_RW_LATCH (3)
+
+#define HINFC504_NFC_TIMEOUT (2 * HZ)
+#define HINFC504_NFC_PM_TIMEOUT (1 * HZ)
+#define HINFC504_NFC_DMA_TIMEOUT (5 * HZ)
+#define HINFC504_CHIP_DELAY (25)
+
+#define HINFC504_REG_BASE_ADDRESS_LEN (0x100)
+#define HINFC504_BUFFER_BASE_ADDRESS_LEN (2048 + 128)
+
+#define HINFC504_ADDR_CYCLE_MASK 0x4
+
+#define HINFC504_CON 0x00
+#define HINFC504_CON_OP_MODE_NORMAL BIT(0)
+#define HINFC504_CON_PAGEISZE_SHIFT (1)
+#define HINFC504_CON_PAGESIZE_MASK (0x07)
+#define HINFC504_CON_BUS_WIDTH BIT(4)
+#define HINFC504_CON_READY_BUSY_SEL BIT(8)
+#define HINFC504_CON_ECCTYPE_SHIFT (9)
+#define HINFC504_CON_ECCTYPE_MASK (0x07)
+
+#define HINFC504_PWIDTH 0x04
+#define SET_HINFC504_PWIDTH(_w_lcnt, _r_lcnt, _rw_hcnt) \
+ ((_w_lcnt) | (((_r_lcnt) & 0x0F) << 4) | (((_rw_hcnt) & 0x0F) << 8))
+
+#define HINFC504_CMD 0x0C
+#define HINFC504_ADDRL 0x10
+#define HINFC504_ADDRH 0x14
+#define HINFC504_DATA_NUM 0x18
+
+#define HINFC504_OP 0x1C
+#define HINFC504_OP_READ_DATA_EN BIT(1)
+#define HINFC504_OP_WAIT_READY_EN BIT(2)
+#define HINFC504_OP_CMD2_EN BIT(3)
+#define HINFC504_OP_WRITE_DATA_EN BIT(4)
+#define HINFC504_OP_ADDR_EN BIT(5)
+#define HINFC504_OP_CMD1_EN BIT(6)
+#define HINFC504_OP_NF_CS_SHIFT (7)
+#define HINFC504_OP_NF_CS_MASK (3)
+#define HINFC504_OP_ADDR_CYCLE_SHIFT (9)
+#define HINFC504_OP_ADDR_CYCLE_MASK (7)
+
+#define HINFC504_STATUS 0x20
+#define HINFC504_READY BIT(0)
+
+#define HINFC504_INTEN 0x24
+#define HINFC504_INTEN_DMA BIT(9)
+#define HINFC504_INTEN_UE BIT(6)
+#define HINFC504_INTEN_CE BIT(5)
+
+#define HINFC504_INTS 0x28
+#define HINFC504_INTS_DMA BIT(9)
+#define HINFC504_INTS_UE BIT(6)
+#define HINFC504_INTS_CE BIT(5)
+
+#define HINFC504_INTCLR 0x2C
+#define HINFC504_INTCLR_DMA BIT(9)
+#define HINFC504_INTCLR_UE BIT(6)
+#define HINFC504_INTCLR_CE BIT(5)
+
+#define HINFC504_ECC_STATUS 0x5C
+#define HINFC504_ECC_16_BIT_SHIFT 12
+
+#define HINFC504_DMA_CTRL 0x60
+#define HINFC504_DMA_CTRL_DMA_START BIT(0)
+#define HINFC504_DMA_CTRL_WE BIT(1)
+#define HINFC504_DMA_CTRL_DATA_AREA_EN BIT(2)
+#define HINFC504_DMA_CTRL_OOB_AREA_EN BIT(3)
+#define HINFC504_DMA_CTRL_BURST4_EN BIT(4)
+#define HINFC504_DMA_CTRL_BURST8_EN BIT(5)
+#define HINFC504_DMA_CTRL_BURST16_EN BIT(6)
+#define HINFC504_DMA_CTRL_ADDR_NUM_SHIFT (7)
+#define HINFC504_DMA_CTRL_ADDR_NUM_MASK (1)
+#define HINFC504_DMA_CTRL_CS_SHIFT (8)
+#define HINFC504_DMA_CTRL_CS_MASK (0x03)
+
+#define HINFC504_DMA_ADDR_DATA 0x64
+#define HINFC504_DMA_ADDR_OOB 0x68
+
+#define HINFC504_DMA_LEN 0x6C
+#define HINFC504_DMA_LEN_OOB_SHIFT (16)
+#define HINFC504_DMA_LEN_OOB_MASK (0xFFF)
+
+#define HINFC504_DMA_PARA 0x70
+#define HINFC504_DMA_PARA_DATA_RW_EN BIT(0)
+#define HINFC504_DMA_PARA_OOB_RW_EN BIT(1)
+#define HINFC504_DMA_PARA_DATA_EDC_EN BIT(2)
+#define HINFC504_DMA_PARA_OOB_EDC_EN BIT(3)
+#define HINFC504_DMA_PARA_DATA_ECC_EN BIT(4)
+#define HINFC504_DMA_PARA_OOB_ECC_EN BIT(5)
+
+#define HINFC_VERSION 0x74
+#define HINFC504_LOG_READ_ADDR 0x7C
+#define HINFC504_LOG_READ_LEN 0x80
+
+#define HINFC504_NANDINFO_LEN 0x10
+
+struct hinfc_host {
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *iobase;
+ void __iomem *mmio;
+ struct completion cmd_complete;
+ unsigned int offset;
+ unsigned int command;
+ int chipselect;
+ unsigned int addr_cycle;
+ u32 addr_value[2];
+ u32 cache_addr_value[2];
+ char *buffer;
+ dma_addr_t dma_buffer;
+ dma_addr_t dma_oob;
+ int version;
+ unsigned int irq_status; /* interrupt status */
+};
+
+static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg)
+{
+ return readl(host->iobase + reg);
+}
+
+static inline void hinfc_write(struct hinfc_host *host, unsigned int value,
+ unsigned int reg)
+{
+ writel(value, host->iobase + reg);
+}
+
+static void wait_controller_finished(struct hinfc_host *host)
+{
+ unsigned long timeout = jiffies + HINFC504_NFC_TIMEOUT;
+ int val;
+
+ while (time_before(jiffies, timeout)) {
+ val = hinfc_read(host, HINFC504_STATUS);
+ if (host->command == NAND_CMD_ERASE2) {
+ /* nfc is ready */
+ while (!(val & HINFC504_READY)) {
+ usleep_range(500, 1000);
+ val = hinfc_read(host, HINFC504_STATUS);
+ }
+ return;
+ }
+
+ if (val & HINFC504_READY)
+ return;
+ }
+
+ /* wait cmd timeout */
+ dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n");
+}
+
+static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned long val;
+ int ret;
+
+ hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
+ hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
+ hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
+ << HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
+
+ hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
+ | HINFC504_DMA_PARA_OOB_RW_EN, HINFC504_DMA_PARA);
+ } else {
+ if (host->command == NAND_CMD_READOOB)
+ hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN
+ | HINFC504_DMA_PARA_OOB_EDC_EN
+ | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
+ else
+ hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
+ | HINFC504_DMA_PARA_OOB_RW_EN
+ | HINFC504_DMA_PARA_DATA_EDC_EN
+ | HINFC504_DMA_PARA_OOB_EDC_EN
+ | HINFC504_DMA_PARA_DATA_ECC_EN
+ | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
+
+ }
+
+ val = (HINFC504_DMA_CTRL_DMA_START | HINFC504_DMA_CTRL_BURST4_EN
+ | HINFC504_DMA_CTRL_BURST8_EN | HINFC504_DMA_CTRL_BURST16_EN
+ | HINFC504_DMA_CTRL_DATA_AREA_EN | HINFC504_DMA_CTRL_OOB_AREA_EN
+ | ((host->addr_cycle == 4 ? 1 : 0)
+ << HINFC504_DMA_CTRL_ADDR_NUM_SHIFT)
+ | ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK)
+ << HINFC504_DMA_CTRL_CS_SHIFT));
+
+ if (todev)
+ val |= HINFC504_DMA_CTRL_WE;
+
+ init_completion(&host->cmd_complete);
+
+ hinfc_write(host, val, HINFC504_DMA_CTRL);
+ ret = wait_for_completion_timeout(&host->cmd_complete,
+ HINFC504_NFC_DMA_TIMEOUT);
+
+ if (!ret) {
+ dev_err(host->dev, "DMA operation(irq) timeout!\n");
+ /* sanity check */
+ val = hinfc_read(host, HINFC504_DMA_CTRL);
+ if (!(val & HINFC504_DMA_CTRL_DMA_START))
+ dev_err(host->dev, "DMA is already done but without irq ACK!\n");
+ else
+ dev_err(host->dev, "DMA is really timeout!\n");
+ }
+}
+
+static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host)
+{
+ host->addr_value[0] &= 0xffff0000;
+
+ hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+ hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
+ hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN,
+ HINFC504_CMD);
+
+ hisi_nfc_dma_transfer(host, 1);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host)
+{
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+
+ if ((host->addr_value[0] == host->cache_addr_value[0]) &&
+ (host->addr_value[1] == host->cache_addr_value[1]))
+ return 0;
+
+ host->addr_value[0] &= 0xffff0000;
+
+ hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+ hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
+ hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0,
+ HINFC504_CMD);
+
+ hinfc_write(host, 0, HINFC504_LOG_READ_ADDR);
+ hinfc_write(host, mtd->writesize + mtd->oobsize,
+ HINFC504_LOG_READ_LEN);
+
+ hisi_nfc_dma_transfer(host, 0);
+
+ host->cache_addr_value[0] = host->addr_value[0];
+ host->cache_addr_value[1] = host->addr_value[1];
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_erase(struct hinfc_host *host)
+{
+ hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+ hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1,
+ HINFC504_CMD);
+
+ hinfc_write(host, HINFC504_OP_WAIT_READY_EN
+ | HINFC504_OP_CMD2_EN
+ | HINFC504_OP_CMD1_EN
+ | HINFC504_OP_ADDR_EN
+ | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT)
+ | ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK)
+ << HINFC504_OP_ADDR_CYCLE_SHIFT),
+ HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_readid(struct hinfc_host *host)
+{
+ hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
+ hinfc_write(host, NAND_CMD_READID, HINFC504_CMD);
+ hinfc_write(host, 0, HINFC504_ADDRL);
+
+ hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN
+ | HINFC504_OP_READ_DATA_EN
+ | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT)
+ | 1 << HINFC504_OP_ADDR_CYCLE_SHIFT, HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_status(struct hinfc_host *host)
+{
+ hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
+ hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD);
+ hinfc_write(host, HINFC504_OP_CMD1_EN
+ | HINFC504_OP_READ_DATA_EN
+ | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT),
+ HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect)
+{
+ hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD);
+
+ hinfc_write(host, HINFC504_OP_CMD1_EN
+ | ((chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT)
+ | HINFC504_OP_WAIT_READY_EN,
+ HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static void hisi_nfc_select_chip(struct nand_chip *chip, int chipselect)
+{
+ struct hinfc_host *host = nand_get_controller_data(chip);
+
+ if (chipselect < 0)
+ return;
+
+ host->chipselect = chipselect;
+}
+
+static uint8_t hisi_nfc_read_byte(struct nand_chip *chip)
+{
+ struct hinfc_host *host = nand_get_controller_data(chip);
+
+ if (host->command == NAND_CMD_STATUS)
+ return *(uint8_t *)(host->mmio);
+
+ host->offset++;
+
+ if (host->command == NAND_CMD_READID)
+ return *(uint8_t *)(host->mmio + host->offset - 1);
+
+ return *(uint8_t *)(host->buffer + host->offset - 1);
+}
+
+static void
+hisi_nfc_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ struct hinfc_host *host = nand_get_controller_data(chip);
+
+ memcpy(host->buffer + host->offset, buf, len);
+ host->offset += len;
+}
+
+static void hisi_nfc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct hinfc_host *host = nand_get_controller_data(chip);
+
+ memcpy(buf, host->buffer + host->offset, len);
+ host->offset += len;
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+ unsigned int command = host->command;
+
+ host->addr_cycle = 0;
+ host->addr_value[0] = 0;
+ host->addr_value[1] = 0;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+
+ host->addr_value[0] = column & 0xffff;
+ host->addr_cycle = 2;
+ }
+ if (page_addr != -1) {
+ host->addr_value[0] |= (page_addr & 0xffff)
+ << (host->addr_cycle * 8);
+ host->addr_cycle += 2;
+ if (chip->options & NAND_ROW_ADDR_3) {
+ host->addr_cycle += 1;
+ if (host->command == NAND_CMD_ERASE1)
+ host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
+ else
+ host->addr_value[1] |= ((page_addr >> 16) & 0xff);
+ }
+ }
+}
+
+static void hisi_nfc_cmdfunc(struct nand_chip *chip, unsigned command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+ int is_cache_invalid = 1;
+ unsigned int flag = 0;
+
+ host->command = command;
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ if (command == NAND_CMD_READ0)
+ host->offset = column;
+ else
+ host->offset = column + mtd->writesize;
+
+ is_cache_invalid = 0;
+ set_addr(mtd, column, page_addr);
+ hisi_nfc_send_cmd_readstart(host);
+ break;
+
+ case NAND_CMD_SEQIN:
+ host->offset = column;
+ set_addr(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_ERASE1:
+ set_addr(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ hisi_nfc_send_cmd_pageprog(host);
+ break;
+
+ case NAND_CMD_ERASE2:
+ hisi_nfc_send_cmd_erase(host);
+ break;
+
+ case NAND_CMD_READID:
+ host->offset = column;
+ memset(host->mmio, 0, 0x10);
+ hisi_nfc_send_cmd_readid(host);
+ break;
+
+ case NAND_CMD_STATUS:
+ flag = hinfc_read(host, HINFC504_CON);
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ hinfc_write(host,
+ flag & ~(HINFC504_CON_ECCTYPE_MASK <<
+ HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
+
+ host->offset = 0;
+ memset(host->mmio, 0, 0x10);
+ hisi_nfc_send_cmd_status(host);
+ hinfc_write(host, flag, HINFC504_CON);
+ break;
+
+ case NAND_CMD_RESET:
+ hisi_nfc_send_cmd_reset(host, host->chipselect);
+ break;
+
+ default:
+ dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n",
+ command, column, page_addr);
+ }
+
+ if (is_cache_invalid) {
+ host->cache_addr_value[0] = ~0;
+ host->cache_addr_value[1] = ~0;
+ }
+}
+
+static irqreturn_t hinfc_irq_handle(int irq, void *devid)
+{
+ struct hinfc_host *host = devid;
+ unsigned int flag;
+
+ flag = hinfc_read(host, HINFC504_INTS);
+ /* store interrupts state */
+ host->irq_status |= flag;
+
+ if (flag & HINFC504_INTS_DMA) {
+ hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR);
+ complete(&host->cmd_complete);
+ } else if (flag & HINFC504_INTS_CE) {
+ hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR);
+ } else if (flag & HINFC504_INTS_UE) {
+ hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+ int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
+ int stat_1, stat_2;
+
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ /* errors which can not be corrected by ECC */
+ if (host->irq_status & HINFC504_INTS_UE) {
+ mtd->ecc_stats.failed++;
+ } else if (host->irq_status & HINFC504_INTS_CE) {
+ /* TODO: need add other ECC modes! */
+ switch (chip->ecc.strength) {
+ case 16:
+ status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >>
+ HINFC504_ECC_16_BIT_SHIFT & 0x0fff;
+ stat_2 = status_ecc & 0x3f;
+ stat_1 = status_ecc >> 6 & 0x3f;
+ stat = stat_1 + stat_2;
+ stat_max = max_t(int, stat_1, stat_2);
+ }
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(int, max_bitflips, stat_max);
+ }
+ host->irq_status = 0;
+
+ return max_bitflips;
+}
+
+static int hisi_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+
+ if (host->irq_status & HINFC504_INTS_UE) {
+ host->irq_status = 0;
+ return -EBADMSG;
+ }
+
+ host->irq_status = 0;
+ return 0;
+}
+
+static int hisi_nand_write_page_hwecc(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (oob_required)
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static void hisi_nfc_host_init(struct hinfc_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ unsigned int flag = 0;
+
+ host->version = hinfc_read(host, HINFC_VERSION);
+ host->addr_cycle = 0;
+ host->addr_value[0] = 0;
+ host->addr_value[1] = 0;
+ host->cache_addr_value[0] = ~0;
+ host->cache_addr_value[1] = ~0;
+ host->chipselect = 0;
+
+ /* default page size: 2K, ecc_none. need modify */
+ flag = HINFC504_CON_OP_MODE_NORMAL | HINFC504_CON_READY_BUSY_SEL
+ | ((0x001 & HINFC504_CON_PAGESIZE_MASK)
+ << HINFC504_CON_PAGEISZE_SHIFT)
+ | ((0x0 & HINFC504_CON_ECCTYPE_MASK)
+ << HINFC504_CON_ECCTYPE_SHIFT)
+ | ((chip->options & NAND_BUSWIDTH_16) ?
+ HINFC504_CON_BUS_WIDTH : 0);
+ hinfc_write(host, flag, HINFC504_CON);
+
+ memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN);
+
+ hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
+ HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
+
+ /* enable DMA irq */
+ hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
+}
+
+static int hisi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ /* FIXME: add ECC bytes position */
+ return -ENOTSUPP;
+}
+
+static int hisi_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops hisi_ooblayout_ops = {
+ .ecc = hisi_ooblayout_ecc,
+ .free = hisi_ooblayout_free,
+};
+
+static int hisi_nfc_ecc_probe(struct hinfc_host *host)
+{
+ unsigned int flag;
+ int size, strength, ecc_bits;
+ struct device *dev = host->dev;
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ size = chip->ecc.size;
+ strength = chip->ecc.strength;
+ if (size != 1024) {
+ dev_err(dev, "error ecc size: %d\n", size);
+ return -EINVAL;
+ }
+
+ if ((size == 1024) && ((strength != 8) && (strength != 16) &&
+ (strength != 24) && (strength != 40))) {
+ dev_err(dev, "ecc size and strength do not match\n");
+ return -EINVAL;
+ }
+
+ chip->ecc.size = size;
+ chip->ecc.strength = strength;
+
+ chip->ecc.read_page = hisi_nand_read_page_hwecc;
+ chip->ecc.read_oob = hisi_nand_read_oob;
+ chip->ecc.write_page = hisi_nand_write_page_hwecc;
+
+ switch (chip->ecc.strength) {
+ case 16:
+ ecc_bits = 6;
+ if (mtd->writesize == 2048)
+ mtd_set_ooblayout(mtd, &hisi_ooblayout_ops);
+
+ /* TODO: add more page size support */
+ break;
+
+ /* TODO: add more ecc strength support */
+ default:
+ dev_err(dev, "not support strength: %d\n", chip->ecc.strength);
+ return -EINVAL;
+ }
+
+ flag = hinfc_read(host, HINFC504_CON);
+ /* add ecc type configure */
+ flag |= ((ecc_bits & HINFC504_CON_ECCTYPE_MASK)
+ << HINFC504_CON_ECCTYPE_SHIFT);
+ hinfc_write(host, flag, HINFC504_CON);
+
+ /* enable ecc irq */
+ flag = hinfc_read(host, HINFC504_INTEN) & 0xfff;
+ hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE,
+ HINFC504_INTEN);
+
+ return 0;
+}
+
+static int hisi_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+ int flag;
+
+ host->buffer = dmam_alloc_coherent(host->dev,
+ mtd->writesize + mtd->oobsize,
+ &host->dma_buffer, GFP_KERNEL);
+ if (!host->buffer)
+ return -ENOMEM;
+
+ host->dma_oob = host->dma_buffer + mtd->writesize;
+ memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
+
+ flag = hinfc_read(host, HINFC504_CON);
+ flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
+ switch (mtd->writesize) {
+ case 2048:
+ flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT);
+ break;
+ /*
+ * TODO: add more pagesize support,
+ * default pagesize has been set in hisi_nfc_host_init
+ */
+ default:
+ dev_err(host->dev, "NON-2KB page size nand flash\n");
+ return -EINVAL;
+ }
+ hinfc_write(host, flag, HINFC504_CON);
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ hisi_nfc_ecc_probe(host);
+
+ return 0;
+}
+
+static const struct nand_controller_ops hisi_nfc_controller_ops = {
+ .attach_chip = hisi_nfc_attach_chip,
+};
+
+static int hisi_nfc_probe(struct platform_device *pdev)
+{
+ int ret = 0, irq, max_chips = HINFC504_MAX_CHIP;
+ struct device *dev = &pdev->dev;
+ struct hinfc_host *host;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct resource *res;
+ struct device_node *np = dev->of_node;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+ host->dev = dev;
+
+ platform_set_drvdata(pdev, host);
+ chip = &host->chip;
+ mtd = nand_to_mtd(chip);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->iobase))
+ return PTR_ERR(host->iobase);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ host->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->mmio)) {
+ dev_err(dev, "devm_ioremap_resource[1] fail\n");
+ return PTR_ERR(host->mmio);
+ }
+
+ mtd->name = "hisi_nand";
+ mtd->dev.parent = &pdev->dev;
+
+ nand_set_controller_data(chip, host);
+ nand_set_flash_node(chip, np);
+ chip->legacy.cmdfunc = hisi_nfc_cmdfunc;
+ chip->legacy.select_chip = hisi_nfc_select_chip;
+ chip->legacy.read_byte = hisi_nfc_read_byte;
+ chip->legacy.write_buf = hisi_nfc_write_buf;
+ chip->legacy.read_buf = hisi_nfc_read_buf;
+ chip->legacy.chip_delay = HINFC504_CHIP_DELAY;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
+
+ hisi_nfc_host_init(host);
+
+ ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ chip->legacy.dummy_controller.ops = &hisi_nfc_controller_ops;
+ ret = nand_scan(chip, max_chips);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "Err MTD partition=%d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hisi_nfc_remove(struct platform_device *pdev)
+{
+ struct hinfc_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hisi_nfc_suspend(struct device *dev)
+{
+ struct hinfc_host *host = dev_get_drvdata(dev);
+ unsigned long timeout = jiffies + HINFC504_NFC_PM_TIMEOUT;
+
+ while (time_before(jiffies, timeout)) {
+ if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) &&
+ (hinfc_read(host, HINFC504_DMA_CTRL) &
+ HINFC504_DMA_CTRL_DMA_START)) {
+ cond_resched();
+ return 0;
+ }
+ }
+
+ dev_err(host->dev, "nand controller suspend timeout.\n");
+
+ return -EAGAIN;
+}
+
+static int hisi_nfc_resume(struct device *dev)
+{
+ int cs;
+ struct hinfc_host *host = dev_get_drvdata(dev);
+ struct nand_chip *chip = &host->chip;
+
+ for (cs = 0; cs < nanddev_ntargets(&chip->base); cs++)
+ hisi_nfc_send_cmd_reset(host, cs);
+ hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
+ HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
+
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(hisi_nfc_pm_ops, hisi_nfc_suspend, hisi_nfc_resume);
+
+static const struct of_device_id nfc_id_table[] = {
+ { .compatible = "hisilicon,504-nfc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, nfc_id_table);
+
+static struct platform_driver hisi_nfc_driver = {
+ .driver = {
+ .name = "hisi_nand",
+ .of_match_table = nfc_id_table,
+ .pm = &hisi_nfc_pm_ops,
+ },
+ .probe = hisi_nfc_probe,
+ .remove = hisi_nfc_remove,
+};
+
+module_platform_driver(hisi_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhou Wang");
+MODULE_AUTHOR("Zhiyong Cai");
+MODULE_DESCRIPTION("Hisilicon Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
new file mode 100644
index 000000000..96c5ae8b1
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MTD_NAND_JZ4780
+ tristate "JZ4780 NAND controller"
+ depends on MIPS || COMPILE_TEST
+ depends on JZ4780_NEMC
+ help
+ Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
+ based boards, using the BCH controller for hardware error correction.
+
+if MTD_NAND_JZ4780
+
+config MTD_NAND_INGENIC_ECC
+ bool
+
+config MTD_NAND_JZ4740_ECC
+ tristate "Hardware BCH support for JZ4740 SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the Reed-Solomon error-correction
+ hardware present on the JZ4740 SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4740-ecc.
+
+config MTD_NAND_JZ4725B_BCH
+ tristate "Hardware BCH support for JZ4725B SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the BCH error-correction hardware
+ present on the JZ4725B SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4725b-bch.
+
+config MTD_NAND_JZ4780_BCH
+ tristate "Hardware BCH support for JZ4780 SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the BCH error-correction hardware
+ present on the JZ4780 SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4780-bch.
+
+endif # MTD_NAND_JZ4780
diff --git a/drivers/mtd/nand/raw/ingenic/Makefile b/drivers/mtd/nand/raw/ingenic/Makefile
new file mode 100644
index 000000000..4c53f5e75
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o
+
+ingenic_nand-y += ingenic_nand_drv.o
+ingenic_nand-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
+
+obj-$(CONFIG_MTD_NAND_JZ4740_ECC) += jz4740_ecc.o
+obj-$(CONFIG_MTD_NAND_JZ4725B_BCH) += jz4725b_bch.o
+obj-$(CONFIG_MTD_NAND_JZ4780_BCH) += jz4780_bch.o
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
new file mode 100644
index 000000000..8e22cd6ec
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ47xx ECC common code
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+/**
+ * ingenic_ecc_calculate() - calculate ECC for a data buffer
+ * @ecc: ECC device.
+ * @params: ECC parameters.
+ * @buf: input buffer with raw data.
+ * @ecc_code: output buffer with ECC.
+ *
+ * Return: 0 on success, -ETIMEDOUT if timed out while waiting for ECC
+ * controller.
+ */
+int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ return ecc->ops->calculate(ecc, params, buf, ecc_code);
+}
+
+/**
+ * ingenic_ecc_correct() - detect and correct bit errors
+ * @ecc: ECC device.
+ * @params: ECC parameters.
+ * @buf: raw data read from the chip.
+ * @ecc_code: ECC read from the chip.
+ *
+ * Given the raw data and the ECC read from the NAND device, detects and
+ * corrects errors in the data.
+ *
+ * Return: the number of bit errors corrected, -EBADMSG if there are too many
+ * errors to correct or -ETIMEDOUT if we timed out waiting for the controller.
+ */
+int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ return ecc->ops->correct(ecc, params, buf, ecc_code);
+}
+
+/**
+ * ingenic_ecc_get() - get the ECC controller device
+ * @np: ECC device tree node.
+ *
+ * Gets the ECC controller device from the specified device tree node. The
+ * device must be released with ingenic_ecc_release() when it is no longer being
+ * used.
+ *
+ * Return: a pointer to ingenic_ecc, errors are encoded into the pointer.
+ * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
+ */
+static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct ingenic_ecc *ecc;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev || !platform_get_drvdata(pdev))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ get_device(&pdev->dev);
+
+ ecc = platform_get_drvdata(pdev);
+ clk_prepare_enable(ecc->clk);
+
+ return ecc;
+}
+
+/**
+ * of_ingenic_ecc_get() - get the ECC controller from a DT node
+ * @of_node: the node that contains an ecc-engine property.
+ *
+ * Get the ecc-engine property from the given device tree
+ * node and pass it to ingenic_ecc_get to do the work.
+ *
+ * Return: a pointer to ingenic_ecc, errors are encoded into the pointer.
+ * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
+ */
+struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *of_node)
+{
+ struct ingenic_ecc *ecc = NULL;
+ struct device_node *np;
+
+ np = of_parse_phandle(of_node, "ecc-engine", 0);
+
+ /*
+ * If the ecc-engine property is not found, check for the deprecated
+ * ingenic,bch-controller property
+ */
+ if (!np)
+ np = of_parse_phandle(of_node, "ingenic,bch-controller", 0);
+
+ if (np) {
+ ecc = ingenic_ecc_get(np);
+ of_node_put(np);
+ }
+ return ecc;
+}
+
+/**
+ * ingenic_ecc_release() - release the ECC controller device
+ * @ecc: ECC device.
+ */
+void ingenic_ecc_release(struct ingenic_ecc *ecc)
+{
+ clk_disable_unprepare(ecc->clk);
+ put_device(ecc->dev);
+}
+
+int ingenic_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_ecc *ecc;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ ecc->ops = device_get_match_data(dev);
+ if (!ecc->ops)
+ return -EINVAL;
+
+ ecc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ecc->base))
+ return PTR_ERR(ecc->base);
+
+ ecc->ops->disable(ecc);
+
+ ecc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ecc->clk)) {
+ dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
+ return PTR_ERR(ecc->clk);
+ }
+
+ mutex_init(&ecc->lock);
+
+ ecc->dev = dev;
+ platform_set_drvdata(pdev, ecc);
+
+ return 0;
+}
+EXPORT_SYMBOL(ingenic_ecc_probe);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
new file mode 100644
index 000000000..017868f59
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__
+#define __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__
+
+#include <linux/compiler_types.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <uapi/asm-generic/errno-base.h>
+
+struct clk;
+struct device;
+struct ingenic_ecc;
+struct platform_device;
+
+/**
+ * struct ingenic_ecc_params - ECC parameters
+ * @size: data bytes per ECC step.
+ * @bytes: ECC bytes per step.
+ * @strength: number of correctable bits per ECC step.
+ */
+struct ingenic_ecc_params {
+ int size;
+ int bytes;
+ int strength;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_INGENIC_ECC)
+int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code);
+int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params, u8 *buf,
+ u8 *ecc_code);
+
+void ingenic_ecc_release(struct ingenic_ecc *ecc);
+struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np);
+#else /* CONFIG_MTD_NAND_INGENIC_ECC */
+static inline int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ return -ENODEV;
+}
+
+static inline int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params, u8 *buf,
+ u8 *ecc_code)
+{
+ return -ENODEV;
+}
+
+static inline void ingenic_ecc_release(struct ingenic_ecc *ecc)
+{
+}
+
+static inline struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_MTD_NAND_INGENIC_ECC */
+
+struct ingenic_ecc_ops {
+ void (*disable)(struct ingenic_ecc *ecc);
+ int (*calculate)(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code);
+ int (*correct)(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code);
+};
+
+struct ingenic_ecc {
+ struct device *dev;
+ const struct ingenic_ecc_ops *ops;
+ void __iomem *base;
+ struct clk *clk;
+ struct mutex lock;
+};
+
+int ingenic_ecc_probe(struct platform_device *pdev);
+
+#endif /* __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__ */
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
new file mode 100644
index 000000000..0e9d426fe
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic JZ47xx NAND driver
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/jz4780-nemc.h>
+
+#include "ingenic_ecc.h"
+
+#define DRV_NAME "ingenic-nand"
+
+struct jz_soc_info {
+ unsigned long data_offset;
+ unsigned long addr_offset;
+ unsigned long cmd_offset;
+ const struct mtd_ooblayout_ops *oob_layout;
+};
+
+struct ingenic_nand_cs {
+ unsigned int bank;
+ void __iomem *base;
+};
+
+struct ingenic_nfc {
+ struct device *dev;
+ struct ingenic_ecc *ecc;
+ const struct jz_soc_info *soc_info;
+ struct nand_controller controller;
+ unsigned int num_banks;
+ struct list_head chips;
+ struct ingenic_nand_cs cs[];
+};
+
+struct ingenic_nand {
+ struct nand_chip chip;
+ struct list_head chip_list;
+
+ struct gpio_desc *busy_gpio;
+ struct gpio_desc *wp_gpio;
+ unsigned int reading: 1;
+};
+
+static inline struct ingenic_nand *to_ingenic_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct ingenic_nand, chip);
+}
+
+static inline struct ingenic_nfc *to_ingenic_nfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct ingenic_nfc, controller);
+}
+
+static int qi_lb60_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section || !ecc->total)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = 12;
+
+ return 0;
+}
+
+static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 12;
+ oobregion->offset = 12 + ecc->total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
+ .ecc = qi_lb60_ooblayout_ecc,
+ .free = qi_lb60_ooblayout_free,
+};
+
+static int jz4725b_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section || !ecc->total)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = 3;
+
+ return 0;
+}
+
+static int jz4725b_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 3;
+ oobregion->offset = 3 + ecc->total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops jz4725b_ooblayout_ops = {
+ .ecc = jz4725b_ooblayout_ecc,
+ .free = jz4725b_ooblayout_free,
+};
+
+static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+
+ nand->reading = (mode == NAND_ECC_READ);
+}
+
+static int ingenic_nand_ecc_calculate(struct nand_chip *chip, const u8 *dat,
+ u8 *ecc_code)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_ecc_params params;
+
+ /*
+ * Don't need to generate the ECC when reading, the ECC engine does it
+ * for us as part of decoding/correction.
+ */
+ if (nand->reading)
+ return 0;
+
+ params.size = nand->chip.ecc.size;
+ params.bytes = nand->chip.ecc.bytes;
+ params.strength = nand->chip.ecc.strength;
+
+ return ingenic_ecc_calculate(nfc->ecc, &params, dat, ecc_code);
+}
+
+static int ingenic_nand_ecc_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_ecc_params params;
+
+ params.size = nand->chip.ecc.size;
+ params.bytes = nand->chip.ecc.bytes;
+ params.strength = nand->chip.ecc.strength;
+
+ return ingenic_ecc_correct(nfc->ecc, &params, dat, read_ecc);
+}
+
+static int ingenic_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
+ int eccbytes;
+
+ if (chip->ecc.strength == 4) {
+ /* JZ4740 uses 9 bytes of ECC to correct maximum 4 errors */
+ chip->ecc.bytes = 9;
+ } else {
+ chip->ecc.bytes = fls((1 + 8) * chip->ecc.size) *
+ (chip->ecc.strength / 8);
+ }
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ if (!nfc->ecc) {
+ dev_err(nfc->dev, "HW ECC selected, but ECC controller not found\n");
+ return -ENODEV;
+ }
+
+ chip->ecc.hwctl = ingenic_nand_ecc_hwctl;
+ chip->ecc.calculate = ingenic_nand_ecc_calculate;
+ chip->ecc.correct = ingenic_nand_ecc_correct;
+ fallthrough;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
+ (nfc->ecc) ? "hardware ECC" : "software ECC",
+ chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ dev_info(nfc->dev, "not using ECC\n");
+ break;
+ default:
+ dev_err(nfc->dev, "ECC mode %d not supported\n",
+ chip->ecc.engine_type);
+ return -EINVAL;
+ }
+
+ /* The NAND core will generate the ECC layout for SW ECC */
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ /* Generate ECC layout. ECC codes are right aligned in the OOB area. */
+ eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
+
+ if (eccbytes > mtd->oobsize - 2) {
+ dev_err(nfc->dev,
+ "invalid ECC config: required %d ECC bytes, but only %d are available",
+ eccbytes, mtd->oobsize - 2);
+ return -EINVAL;
+ }
+
+ /*
+ * The generic layout for BBT markers will most likely overlap with our
+ * ECC bytes in the OOB, so move the BBT markers outside the OOB area.
+ */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ /* For legacy reasons we use a different layout on the qi,lb60 board. */
+ if (of_machine_is_compatible("qi,lb60"))
+ mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
+ else if (nfc->soc_info->oob_layout)
+ mtd_set_ooblayout(mtd, nfc->soc_info->oob_layout);
+ else
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+
+ return 0;
+}
+
+static int ingenic_nand_exec_instr(struct nand_chip *chip,
+ struct ingenic_nand_cs *cs,
+ const struct nand_op_instr *instr)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
+ unsigned int i;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ cs->base + nfc->soc_info->cmd_offset);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb(instr->ctx.addr.addrs[i],
+ cs->base + nfc->soc_info->addr_offset);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ if (instr->ctx.data.force_8bit ||
+ !(chip->options & NAND_BUSWIDTH_16))
+ ioread8_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ ioread16_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ if (instr->ctx.data.force_8bit ||
+ !(chip->options & NAND_BUSWIDTH_16))
+ iowrite8_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ iowrite16_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ if (!nand->busy_gpio)
+ return nand_soft_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
+
+ return nand_gpio_waitrdy(chip, nand->busy_gpio,
+ instr->ctx.waitrdy.timeout_ms);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ingenic_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_nand_cs *cs;
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ cs = &nfc->cs[op->cs];
+ jz4780_nemc_assert(nfc->dev, cs->bank, true);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = ingenic_nand_exec_instr(chip, cs, &op->instrs[i]);
+ if (ret)
+ break;
+
+ if (op->instrs[i].delay_ns)
+ ndelay(op->instrs[i].delay_ns);
+ }
+ jz4780_nemc_assert(nfc->dev, cs->bank, false);
+
+ return ret;
+}
+
+static const struct nand_controller_ops ingenic_nand_controller_ops = {
+ .attach_chip = ingenic_nand_attach_chip,
+ .exec_op = ingenic_nand_exec_op,
+};
+
+static int ingenic_nand_init_chip(struct platform_device *pdev,
+ struct ingenic_nfc *nfc,
+ struct device_node *np,
+ unsigned int chipnr)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_nand *nand;
+ struct ingenic_nand_cs *cs;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ const __be32 *reg;
+ int ret = 0;
+
+ cs = &nfc->cs[chipnr];
+
+ reg = of_get_property(np, "reg", NULL);
+ if (!reg)
+ return -EINVAL;
+
+ cs->bank = be32_to_cpu(*reg);
+
+ jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND);
+
+ cs->base = devm_platform_ioremap_resource(pdev, chipnr);
+ if (IS_ERR(cs->base))
+ return PTR_ERR(cs->base);
+
+ nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand->busy_gpio = devm_gpiod_get_optional(dev, "rb", GPIOD_IN);
+
+ if (IS_ERR(nand->busy_gpio)) {
+ ret = PTR_ERR(nand->busy_gpio);
+ dev_err(dev, "failed to request busy GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * The rb-gpios semantics was undocumented and qi,lb60 (along with
+ * the ingenic driver) got it wrong. The active state encodes the
+ * NAND ready state, which is high level. Since there's no signal
+ * inverter on this board, it should be active-high. Let's fix that
+ * here for older DTs so we can re-use the generic nand_gpio_waitrdy()
+ * helper, and be consistent with what other drivers do.
+ */
+ if (of_machine_is_compatible("qi,lb60") &&
+ gpiod_is_active_low(nand->busy_gpio))
+ gpiod_toggle_active_low(nand->busy_gpio);
+
+ nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
+
+ if (IS_ERR(nand->wp_gpio)) {
+ ret = PTR_ERR(nand->wp_gpio);
+ dev_err(dev, "failed to request WP GPIO: %d\n", ret);
+ return ret;
+ }
+
+ chip = &nand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
+ cs->bank);
+ if (!mtd->name)
+ return -ENOMEM;
+ mtd->dev.parent = dev;
+
+ chip->options = NAND_NO_SUBPAGE_WRITE;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ chip->controller->ops = &ingenic_nand_controller_ops;
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&nand->chip_list, &nfc->chips);
+
+ return 0;
+}
+
+static void ingenic_nand_cleanup_chips(struct ingenic_nfc *nfc)
+{
+ struct ingenic_nand *ingenic_chip;
+ struct nand_chip *chip;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ ingenic_chip = list_first_entry(&nfc->chips,
+ struct ingenic_nand, chip_list);
+ chip = &ingenic_chip->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&ingenic_chip->chip_list);
+ }
+}
+
+static int ingenic_nand_init_chips(struct ingenic_nfc *nfc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ int i = 0;
+ int ret;
+ int num_chips = of_get_child_count(dev->of_node);
+
+ if (num_chips > nfc->num_banks) {
+ dev_err(dev, "found %d chips but only %d banks\n",
+ num_chips, nfc->num_banks);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(dev->of_node, np) {
+ ret = ingenic_nand_init_chip(pdev, nfc, np, i);
+ if (ret) {
+ ingenic_nand_cleanup_chips(nfc);
+ of_node_put(np);
+ return ret;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int ingenic_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int num_banks;
+ struct ingenic_nfc *nfc;
+ int ret;
+
+ num_banks = jz4780_nemc_num_banks(dev);
+ if (num_banks == 0) {
+ dev_err(dev, "no banks found\n");
+ return -ENODEV;
+ }
+
+ nfc = devm_kzalloc(dev, struct_size(nfc, cs, num_banks), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->soc_info = device_get_match_data(dev);
+ if (!nfc->soc_info)
+ return -EINVAL;
+
+ /*
+ * Check for ECC HW before we call nand_scan_ident, to prevent us from
+ * having to call it again if the ECC driver returns -EPROBE_DEFER.
+ */
+ nfc->ecc = of_ingenic_ecc_get(dev->of_node);
+ if (IS_ERR(nfc->ecc))
+ return PTR_ERR(nfc->ecc);
+
+ nfc->dev = dev;
+ nfc->num_banks = num_banks;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ ret = ingenic_nand_init_chips(nfc, pdev);
+ if (ret) {
+ if (nfc->ecc)
+ ingenic_ecc_release(nfc->ecc);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+ return 0;
+}
+
+static int ingenic_nand_remove(struct platform_device *pdev)
+{
+ struct ingenic_nfc *nfc = platform_get_drvdata(pdev);
+
+ if (nfc->ecc)
+ ingenic_ecc_release(nfc->ecc);
+
+ ingenic_nand_cleanup_chips(nfc);
+
+ return 0;
+}
+
+static const struct jz_soc_info jz4740_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00008000,
+ .addr_offset = 0x00010000,
+};
+
+static const struct jz_soc_info jz4725b_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00008000,
+ .addr_offset = 0x00010000,
+ .oob_layout = &jz4725b_ooblayout_ops,
+};
+
+static const struct jz_soc_info jz4780_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00400000,
+ .addr_offset = 0x00800000,
+};
+
+static const struct of_device_id ingenic_nand_dt_match[] = {
+ { .compatible = "ingenic,jz4740-nand", .data = &jz4740_soc_info },
+ { .compatible = "ingenic,jz4725b-nand", .data = &jz4725b_soc_info },
+ { .compatible = "ingenic,jz4780-nand", .data = &jz4780_soc_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ingenic_nand_dt_match);
+
+static struct platform_driver ingenic_nand_driver = {
+ .probe = ingenic_nand_probe,
+ .remove = ingenic_nand_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(ingenic_nand_dt_match),
+ },
+};
+module_platform_driver(ingenic_nand_driver);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
+MODULE_DESCRIPTION("Ingenic JZ47xx NAND driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
new file mode 100644
index 000000000..2d0e0a219
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ4725B BCH controller driver
+ *
+ * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
+ *
+ * Based on jz4780_bch.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+#define BCH_BHCR 0x0
+#define BCH_BHCSR 0x4
+#define BCH_BHCCR 0x8
+#define BCH_BHCNT 0xc
+#define BCH_BHDR 0x10
+#define BCH_BHPAR0 0x14
+#define BCH_BHERR0 0x28
+#define BCH_BHINT 0x24
+#define BCH_BHINTES 0x3c
+#define BCH_BHINTEC 0x40
+#define BCH_BHINTE 0x38
+
+#define BCH_BHCR_ENCE BIT(3)
+#define BCH_BHCR_BSEL BIT(2)
+#define BCH_BHCR_INIT BIT(1)
+#define BCH_BHCR_BCHE BIT(0)
+
+#define BCH_BHCNT_DEC_COUNT_SHIFT 16
+#define BCH_BHCNT_DEC_COUNT_MASK (0x3ff << BCH_BHCNT_DEC_COUNT_SHIFT)
+#define BCH_BHCNT_ENC_COUNT_SHIFT 0
+#define BCH_BHCNT_ENC_COUNT_MASK (0x3ff << BCH_BHCNT_ENC_COUNT_SHIFT)
+
+#define BCH_BHERR_INDEX0_SHIFT 0
+#define BCH_BHERR_INDEX0_MASK (0x1fff << BCH_BHERR_INDEX0_SHIFT)
+#define BCH_BHERR_INDEX1_SHIFT 16
+#define BCH_BHERR_INDEX1_MASK (0x1fff << BCH_BHERR_INDEX1_SHIFT)
+
+#define BCH_BHINT_ERRC_SHIFT 28
+#define BCH_BHINT_ERRC_MASK (0xf << BCH_BHINT_ERRC_SHIFT)
+#define BCH_BHINT_TERRC_SHIFT 16
+#define BCH_BHINT_TERRC_MASK (0x7f << BCH_BHINT_TERRC_SHIFT)
+#define BCH_BHINT_ALL_0 BIT(5)
+#define BCH_BHINT_ALL_F BIT(4)
+#define BCH_BHINT_DECF BIT(3)
+#define BCH_BHINT_ENCF BIT(2)
+#define BCH_BHINT_UNCOR BIT(1)
+#define BCH_BHINT_ERR BIT(0)
+
+/* Timeout for BCH calculation/correction. */
+#define BCH_TIMEOUT_US 100000
+
+static inline void jz4725b_bch_config_set(struct ingenic_ecc *bch, u32 cfg)
+{
+ writel(cfg, bch->base + BCH_BHCSR);
+}
+
+static inline void jz4725b_bch_config_clear(struct ingenic_ecc *bch, u32 cfg)
+{
+ writel(cfg, bch->base + BCH_BHCCR);
+}
+
+static int jz4725b_bch_reset(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params, bool calc_ecc)
+{
+ u32 reg, max_value;
+
+ /* Clear interrupt status. */
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+ /* Initialise and enable BCH. */
+ jz4725b_bch_config_clear(bch, 0x1f);
+ jz4725b_bch_config_set(bch, BCH_BHCR_BCHE);
+
+ if (params->strength == 8)
+ jz4725b_bch_config_set(bch, BCH_BHCR_BSEL);
+ else
+ jz4725b_bch_config_clear(bch, BCH_BHCR_BSEL);
+
+ if (calc_ecc) /* calculate ECC from data */
+ jz4725b_bch_config_set(bch, BCH_BHCR_ENCE);
+ else /* correct data from ECC */
+ jz4725b_bch_config_clear(bch, BCH_BHCR_ENCE);
+
+ jz4725b_bch_config_set(bch, BCH_BHCR_INIT);
+
+ max_value = BCH_BHCNT_ENC_COUNT_MASK >> BCH_BHCNT_ENC_COUNT_SHIFT;
+ if (params->size > max_value)
+ return -EINVAL;
+
+ max_value = BCH_BHCNT_DEC_COUNT_MASK >> BCH_BHCNT_DEC_COUNT_SHIFT;
+ if (params->size + params->bytes > max_value)
+ return -EINVAL;
+
+ /* Set up BCH count register. */
+ reg = params->size << BCH_BHCNT_ENC_COUNT_SHIFT;
+ reg |= (params->size + params->bytes) << BCH_BHCNT_DEC_COUNT_SHIFT;
+ writel(reg, bch->base + BCH_BHCNT);
+
+ return 0;
+}
+
+static void jz4725b_bch_disable(struct ingenic_ecc *bch)
+{
+ /* Clear interrupts */
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+ /* Disable the hardware */
+ jz4725b_bch_config_clear(bch, BCH_BHCR_BCHE);
+}
+
+static void jz4725b_bch_write_data(struct ingenic_ecc *bch, const u8 *buf,
+ size_t size)
+{
+ while (size--)
+ writeb(*buf++, bch->base + BCH_BHDR);
+}
+
+static void jz4725b_bch_read_parity(struct ingenic_ecc *bch, u8 *buf,
+ size_t size)
+{
+ size_t size32 = size / sizeof(u32);
+ size_t size8 = size % sizeof(u32);
+ u32 *dest32;
+ u8 *dest8;
+ u32 val, offset = 0;
+
+ dest32 = (u32 *)buf;
+ while (size32--) {
+ *dest32++ = readl_relaxed(bch->base + BCH_BHPAR0 + offset);
+ offset += sizeof(u32);
+ }
+
+ dest8 = (u8 *)dest32;
+ val = readl_relaxed(bch->base + BCH_BHPAR0 + offset);
+ switch (size8) {
+ case 3:
+ dest8[2] = (val >> 16) & 0xff;
+ fallthrough;
+ case 2:
+ dest8[1] = (val >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ dest8[0] = val & 0xff;
+ break;
+ }
+}
+
+static int jz4725b_bch_wait_complete(struct ingenic_ecc *bch, unsigned int irq,
+ u32 *status)
+{
+ u32 reg;
+ int ret;
+
+ /*
+ * While we could use interrupts here and sleep until the operation
+ * completes, the controller works fairly quickly (usually a few
+ * microseconds) and so the overhead of sleeping until we get an
+ * interrupt quite noticeably decreases performance.
+ */
+ ret = readl_relaxed_poll_timeout(bch->base + BCH_BHINT, reg,
+ reg & irq, 0, BCH_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ if (status)
+ *status = reg;
+
+ writel(reg, bch->base + BCH_BHINT);
+
+ return 0;
+}
+
+static int jz4725b_calculate(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ int ret;
+
+ mutex_lock(&bch->lock);
+
+ ret = jz4725b_bch_reset(bch, params, true);
+ if (ret) {
+ dev_err(bch->dev, "Unable to init BCH with given parameters\n");
+ goto out_disable;
+ }
+
+ jz4725b_bch_write_data(bch, buf, params->size);
+
+ ret = jz4725b_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL);
+ if (ret) {
+ dev_err(bch->dev, "timed out while calculating ECC\n");
+ goto out_disable;
+ }
+
+ jz4725b_bch_read_parity(bch, ecc_code, params->bytes);
+
+out_disable:
+ jz4725b_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+
+ return ret;
+}
+
+static int jz4725b_correct(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ u32 reg, errors, bit;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&bch->lock);
+
+ ret = jz4725b_bch_reset(bch, params, false);
+ if (ret) {
+ dev_err(bch->dev, "Unable to init BCH with given parameters\n");
+ goto out;
+ }
+
+ jz4725b_bch_write_data(bch, buf, params->size);
+ jz4725b_bch_write_data(bch, ecc_code, params->bytes);
+
+ ret = jz4725b_bch_wait_complete(bch, BCH_BHINT_DECF, &reg);
+ if (ret) {
+ dev_err(bch->dev, "timed out while correcting data\n");
+ goto out;
+ }
+
+ if (reg & (BCH_BHINT_ALL_F | BCH_BHINT_ALL_0)) {
+ /* Data and ECC is all 0xff or 0x00 - nothing to correct */
+ ret = 0;
+ goto out;
+ }
+
+ if (reg & BCH_BHINT_UNCOR) {
+ /* Uncorrectable ECC error */
+ ret = -EBADMSG;
+ goto out;
+ }
+
+ errors = (reg & BCH_BHINT_ERRC_MASK) >> BCH_BHINT_ERRC_SHIFT;
+
+ /* Correct any detected errors. */
+ for (i = 0; i < errors; i++) {
+ if (i & 1) {
+ bit = (reg & BCH_BHERR_INDEX1_MASK) >> BCH_BHERR_INDEX1_SHIFT;
+ } else {
+ reg = readl(bch->base + BCH_BHERR0 + (i * 4));
+ bit = (reg & BCH_BHERR_INDEX0_MASK) >> BCH_BHERR_INDEX0_SHIFT;
+ }
+
+ buf[(bit >> 3)] ^= BIT(bit & 0x7);
+ }
+
+out:
+ jz4725b_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+
+ return ret;
+}
+
+static const struct ingenic_ecc_ops jz4725b_bch_ops = {
+ .disable = jz4725b_bch_disable,
+ .calculate = jz4725b_calculate,
+ .correct = jz4725b_correct,
+};
+
+static const struct of_device_id jz4725b_bch_dt_match[] = {
+ { .compatible = "ingenic,jz4725b-bch", .data = &jz4725b_bch_ops },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4725b_bch_dt_match);
+
+static struct platform_driver jz4725b_bch_driver = {
+ .probe = ingenic_ecc_probe,
+ .driver = {
+ .name = "jz4725b-bch",
+ .of_match_table = jz4725b_bch_dt_match,
+ },
+};
+module_platform_driver(jz4725b_bch_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4725B BCH controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
new file mode 100644
index 000000000..54e377754
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ4740 ECC controller driver
+ *
+ * Copyright (c) 2019 Paul Cercueil <paul@crapouillou.net>
+ *
+ * based on jz4740-nand.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+#define JZ_REG_NAND_ECC_CTRL 0x00
+#define JZ_REG_NAND_DATA 0x04
+#define JZ_REG_NAND_PAR0 0x08
+#define JZ_REG_NAND_PAR1 0x0C
+#define JZ_REG_NAND_PAR2 0x10
+#define JZ_REG_NAND_IRQ_STAT 0x14
+#define JZ_REG_NAND_IRQ_CTRL 0x18
+#define JZ_REG_NAND_ERR(x) (0x1C + ((x) << 2))
+
+#define JZ_NAND_ECC_CTRL_PAR_READY BIT(4)
+#define JZ_NAND_ECC_CTRL_ENCODING BIT(3)
+#define JZ_NAND_ECC_CTRL_RS BIT(2)
+#define JZ_NAND_ECC_CTRL_RESET BIT(1)
+#define JZ_NAND_ECC_CTRL_ENABLE BIT(0)
+
+#define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29))
+#define JZ_NAND_STATUS_PAD_FINISH BIT(4)
+#define JZ_NAND_STATUS_DEC_FINISH BIT(3)
+#define JZ_NAND_STATUS_ENC_FINISH BIT(2)
+#define JZ_NAND_STATUS_UNCOR_ERROR BIT(1)
+#define JZ_NAND_STATUS_ERROR BIT(0)
+
+static const uint8_t empty_block_ecc[] = {
+ 0xcd, 0x9d, 0x90, 0x58, 0xf4, 0x8b, 0xff, 0xb7, 0x6f
+};
+
+static void jz4740_ecc_reset(struct ingenic_ecc *ecc, bool calc_ecc)
+{
+ uint32_t reg;
+
+ /* Clear interrupt status */
+ writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT);
+
+ /* Initialize and enable ECC hardware */
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_RESET;
+ reg |= JZ_NAND_ECC_CTRL_ENABLE;
+ reg |= JZ_NAND_ECC_CTRL_RS;
+ if (calc_ecc) /* calculate ECC from data */
+ reg |= JZ_NAND_ECC_CTRL_ENCODING;
+ else /* correct data from ECC */
+ reg &= ~JZ_NAND_ECC_CTRL_ENCODING;
+
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static int jz4740_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ uint32_t reg, status;
+ unsigned int timeout = 1000;
+ int i;
+
+ jz4740_ecc_reset(ecc, true);
+
+ do {
+ status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ for (i = 0; i < params->bytes; ++i)
+ ecc_code[i] = readb(ecc->base + JZ_REG_NAND_PAR0 + i);
+
+ /*
+ * If the written data is completely 0xff, we also want to write 0xff as
+ * ECC, otherwise we will get in trouble when doing subpage writes.
+ */
+ if (memcmp(ecc_code, empty_block_ecc, sizeof(empty_block_ecc)) == 0)
+ memset(ecc_code, 0xff, sizeof(empty_block_ecc));
+
+ return 0;
+}
+
+static void jz_nand_correct_data(uint8_t *buf, int index, int mask)
+{
+ int offset = index & 0x7;
+ uint16_t data;
+
+ index += (index >> 3);
+
+ data = buf[index];
+ data |= buf[index + 1] << 8;
+
+ mask ^= (data >> offset) & 0x1ff;
+ data &= ~(0x1ff << offset);
+ data |= (mask << offset);
+
+ buf[index] = data & 0xff;
+ buf[index + 1] = (data >> 8) & 0xff;
+}
+
+static int jz4740_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ int i, error_count, index;
+ uint32_t reg, status, error;
+ unsigned int timeout = 1000;
+
+ jz4740_ecc_reset(ecc, false);
+
+ for (i = 0; i < params->bytes; ++i)
+ writeb(ecc_code[i], ecc->base + JZ_REG_NAND_PAR0 + i);
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_PAR_READY;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ do {
+ status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ if (status & JZ_NAND_STATUS_ERROR) {
+ if (status & JZ_NAND_STATUS_UNCOR_ERROR)
+ return -EBADMSG;
+
+ error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29;
+
+ for (i = 0; i < error_count; ++i) {
+ error = readl(ecc->base + JZ_REG_NAND_ERR(i));
+ index = ((error >> 16) & 0x1ff) - 1;
+ if (index >= 0 && index < params->size)
+ jz_nand_correct_data(buf, index, error & 0x1ff);
+ }
+
+ return error_count;
+ }
+
+ return 0;
+}
+
+static void jz4740_ecc_disable(struct ingenic_ecc *ecc)
+{
+ u32 reg;
+
+ writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT);
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static const struct ingenic_ecc_ops jz4740_ecc_ops = {
+ .disable = jz4740_ecc_disable,
+ .calculate = jz4740_ecc_calculate,
+ .correct = jz4740_ecc_correct,
+};
+
+static const struct of_device_id jz4740_ecc_dt_match[] = {
+ { .compatible = "ingenic,jz4740-ecc", .data = &jz4740_ecc_ops },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4740_ecc_dt_match);
+
+static struct platform_driver jz4740_ecc_driver = {
+ .probe = ingenic_ecc_probe,
+ .driver = {
+ .name = "jz4740-ecc",
+ .of_match_table = jz4740_ecc_dt_match,
+ },
+};
+module_platform_driver(jz4740_ecc_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4740 ECC controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
new file mode 100644
index 000000000..d67dbfff7
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ4780 BCH controller driver
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+#define BCH_BHCR 0x0
+#define BCH_BHCCR 0x8
+#define BCH_BHCNT 0xc
+#define BCH_BHDR 0x10
+#define BCH_BHPAR0 0x14
+#define BCH_BHERR0 0x84
+#define BCH_BHINT 0x184
+#define BCH_BHINTES 0x188
+#define BCH_BHINTEC 0x18c
+#define BCH_BHINTE 0x190
+
+#define BCH_BHCR_BSEL_SHIFT 4
+#define BCH_BHCR_BSEL_MASK (0x7f << BCH_BHCR_BSEL_SHIFT)
+#define BCH_BHCR_ENCE BIT(2)
+#define BCH_BHCR_INIT BIT(1)
+#define BCH_BHCR_BCHE BIT(0)
+
+#define BCH_BHCNT_PARITYSIZE_SHIFT 16
+#define BCH_BHCNT_PARITYSIZE_MASK (0x7f << BCH_BHCNT_PARITYSIZE_SHIFT)
+#define BCH_BHCNT_BLOCKSIZE_SHIFT 0
+#define BCH_BHCNT_BLOCKSIZE_MASK (0x7ff << BCH_BHCNT_BLOCKSIZE_SHIFT)
+
+#define BCH_BHERR_MASK_SHIFT 16
+#define BCH_BHERR_MASK_MASK (0xffff << BCH_BHERR_MASK_SHIFT)
+#define BCH_BHERR_INDEX_SHIFT 0
+#define BCH_BHERR_INDEX_MASK (0x7ff << BCH_BHERR_INDEX_SHIFT)
+
+#define BCH_BHINT_ERRC_SHIFT 24
+#define BCH_BHINT_ERRC_MASK (0x7f << BCH_BHINT_ERRC_SHIFT)
+#define BCH_BHINT_TERRC_SHIFT 16
+#define BCH_BHINT_TERRC_MASK (0x7f << BCH_BHINT_TERRC_SHIFT)
+#define BCH_BHINT_DECF BIT(3)
+#define BCH_BHINT_ENCF BIT(2)
+#define BCH_BHINT_UNCOR BIT(1)
+#define BCH_BHINT_ERR BIT(0)
+
+#define BCH_CLK_RATE (200 * 1000 * 1000)
+
+/* Timeout for BCH calculation/correction. */
+#define BCH_TIMEOUT_US 100000
+
+static void jz4780_bch_reset(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params, bool encode)
+{
+ u32 reg;
+
+ /* Clear interrupt status. */
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+ /* Set up BCH count register. */
+ reg = params->size << BCH_BHCNT_BLOCKSIZE_SHIFT;
+ reg |= params->bytes << BCH_BHCNT_PARITYSIZE_SHIFT;
+ writel(reg, bch->base + BCH_BHCNT);
+
+ /* Initialise and enable BCH. */
+ reg = BCH_BHCR_BCHE | BCH_BHCR_INIT;
+ reg |= params->strength << BCH_BHCR_BSEL_SHIFT;
+ if (encode)
+ reg |= BCH_BHCR_ENCE;
+ writel(reg, bch->base + BCH_BHCR);
+}
+
+static void jz4780_bch_disable(struct ingenic_ecc *bch)
+{
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+ writel(BCH_BHCR_BCHE, bch->base + BCH_BHCCR);
+}
+
+static void jz4780_bch_write_data(struct ingenic_ecc *bch, const void *buf,
+ size_t size)
+{
+ size_t size32 = size / sizeof(u32);
+ size_t size8 = size % sizeof(u32);
+ const u32 *src32;
+ const u8 *src8;
+
+ src32 = (const u32 *)buf;
+ while (size32--)
+ writel(*src32++, bch->base + BCH_BHDR);
+
+ src8 = (const u8 *)src32;
+ while (size8--)
+ writeb(*src8++, bch->base + BCH_BHDR);
+}
+
+static void jz4780_bch_read_parity(struct ingenic_ecc *bch, void *buf,
+ size_t size)
+{
+ size_t size32 = size / sizeof(u32);
+ size_t size8 = size % sizeof(u32);
+ u32 *dest32;
+ u8 *dest8;
+ u32 val, offset = 0;
+
+ dest32 = (u32 *)buf;
+ while (size32--) {
+ *dest32++ = readl(bch->base + BCH_BHPAR0 + offset);
+ offset += sizeof(u32);
+ }
+
+ dest8 = (u8 *)dest32;
+ val = readl(bch->base + BCH_BHPAR0 + offset);
+ switch (size8) {
+ case 3:
+ dest8[2] = (val >> 16) & 0xff;
+ fallthrough;
+ case 2:
+ dest8[1] = (val >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ dest8[0] = val & 0xff;
+ break;
+ }
+}
+
+static bool jz4780_bch_wait_complete(struct ingenic_ecc *bch, unsigned int irq,
+ u32 *status)
+{
+ u32 reg;
+ int ret;
+
+ /*
+ * While we could use interrupts here and sleep until the operation
+ * completes, the controller works fairly quickly (usually a few
+ * microseconds) and so the overhead of sleeping until we get an
+ * interrupt quite noticeably decreases performance.
+ */
+ ret = readl_poll_timeout(bch->base + BCH_BHINT, reg,
+ (reg & irq) == irq, 0, BCH_TIMEOUT_US);
+ if (ret)
+ return false;
+
+ if (status)
+ *status = reg;
+
+ writel(reg, bch->base + BCH_BHINT);
+ return true;
+}
+
+static int jz4780_calculate(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ int ret = 0;
+
+ mutex_lock(&bch->lock);
+
+ jz4780_bch_reset(bch, params, true);
+ jz4780_bch_write_data(bch, buf, params->size);
+
+ if (jz4780_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL)) {
+ jz4780_bch_read_parity(bch, ecc_code, params->bytes);
+ } else {
+ dev_err(bch->dev, "timed out while calculating ECC\n");
+ ret = -ETIMEDOUT;
+ }
+
+ jz4780_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+ return ret;
+}
+
+static int jz4780_correct(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ u32 reg, mask, index;
+ int i, ret, count;
+
+ mutex_lock(&bch->lock);
+
+ jz4780_bch_reset(bch, params, false);
+ jz4780_bch_write_data(bch, buf, params->size);
+ jz4780_bch_write_data(bch, ecc_code, params->bytes);
+
+ if (!jz4780_bch_wait_complete(bch, BCH_BHINT_DECF, &reg)) {
+ dev_err(bch->dev, "timed out while correcting data\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (reg & BCH_BHINT_UNCOR) {
+ dev_warn(bch->dev, "uncorrectable ECC error\n");
+ ret = -EBADMSG;
+ goto out;
+ }
+
+ /* Correct any detected errors. */
+ if (reg & BCH_BHINT_ERR) {
+ count = (reg & BCH_BHINT_ERRC_MASK) >> BCH_BHINT_ERRC_SHIFT;
+ ret = (reg & BCH_BHINT_TERRC_MASK) >> BCH_BHINT_TERRC_SHIFT;
+
+ for (i = 0; i < count; i++) {
+ reg = readl(bch->base + BCH_BHERR0 + (i * 4));
+ mask = (reg & BCH_BHERR_MASK_MASK) >>
+ BCH_BHERR_MASK_SHIFT;
+ index = (reg & BCH_BHERR_INDEX_MASK) >>
+ BCH_BHERR_INDEX_SHIFT;
+ buf[(index * 2) + 0] ^= mask;
+ buf[(index * 2) + 1] ^= mask >> 8;
+ }
+ } else {
+ ret = 0;
+ }
+
+out:
+ jz4780_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+ return ret;
+}
+
+static int jz4780_bch_probe(struct platform_device *pdev)
+{
+ struct ingenic_ecc *bch;
+ int ret;
+
+ ret = ingenic_ecc_probe(pdev);
+ if (ret)
+ return ret;
+
+ bch = platform_get_drvdata(pdev);
+ clk_set_rate(bch->clk, BCH_CLK_RATE);
+
+ return 0;
+}
+
+static const struct ingenic_ecc_ops jz4780_bch_ops = {
+ .disable = jz4780_bch_disable,
+ .calculate = jz4780_calculate,
+ .correct = jz4780_correct,
+};
+
+static const struct of_device_id jz4780_bch_dt_match[] = {
+ { .compatible = "ingenic,jz4780-bch", .data = &jz4780_bch_ops },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4780_bch_dt_match);
+
+static struct platform_driver jz4780_bch_driver = {
+ .probe = jz4780_bch_probe,
+ .driver = {
+ .name = "jz4780-bch",
+ .of_match_table = of_match_ptr(jz4780_bch_dt_match),
+ },
+};
+module_platform_driver(jz4780_bch_driver);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
+MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
new file mode 100644
index 000000000..012876e14
--- /dev/null
+++ b/drivers/mtd/nand/raw/internals.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 - Bootlin
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ *
+ * Header containing internal definitions to be used only by core files.
+ * NAND controller drivers should not include this file.
+ */
+
+#ifndef __LINUX_RAWNAND_INTERNALS
+#define __LINUX_RAWNAND_INTERNALS
+
+#include <linux/mtd/rawnand.h>
+
+/*
+ * NAND Flash Manufacturer ID Codes
+ */
+#define NAND_MFR_AMD 0x01
+#define NAND_MFR_ATO 0x9b
+#define NAND_MFR_EON 0x92
+#define NAND_MFR_ESMT 0xc8
+#define NAND_MFR_FUJITSU 0x04
+#define NAND_MFR_HYNIX 0xad
+#define NAND_MFR_INTEL 0x89
+#define NAND_MFR_MACRONIX 0xc2
+#define NAND_MFR_MICRON 0x2c
+#define NAND_MFR_NATIONAL 0x8f
+#define NAND_MFR_RENESAS 0x07
+#define NAND_MFR_SAMSUNG 0xec
+#define NAND_MFR_SANDISK 0x45
+#define NAND_MFR_STMICRO 0x20
+/* Kioxia is new name of Toshiba memory. */
+#define NAND_MFR_TOSHIBA 0x98
+#define NAND_MFR_WINBOND 0xef
+
+/**
+ * struct nand_manufacturer_ops - NAND Manufacturer operations
+ * @detect: detect the NAND memory organization and capabilities
+ * @init: initialize all vendor specific fields (like the ->read_retry()
+ * implementation) if any.
+ * @cleanup: the ->init() function may have allocated resources, ->cleanup()
+ * is here to let vendor specific code release those resources.
+ * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
+ * page. This is called after the checksum is verified.
+ */
+struct nand_manufacturer_ops {
+ void (*detect)(struct nand_chip *chip);
+ int (*init)(struct nand_chip *chip);
+ void (*cleanup)(struct nand_chip *chip);
+ void (*fixup_onfi_param_page)(struct nand_chip *chip,
+ struct nand_onfi_params *p);
+};
+
+/**
+ * struct nand_manufacturer_desc - NAND Flash Manufacturer descriptor
+ * @name: Manufacturer name
+ * @id: manufacturer ID code of device.
+ * @ops: manufacturer operations
+ */
+struct nand_manufacturer_desc {
+ int id;
+ char *name;
+ const struct nand_manufacturer_ops *ops;
+};
+
+
+extern struct nand_flash_dev nand_flash_ids[];
+
+extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
+extern const struct nand_manufacturer_ops esmt_nand_manuf_ops;
+extern const struct nand_manufacturer_ops hynix_nand_manuf_ops;
+extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
+extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
+extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
+extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
+
+/* MLC pairing schemes */
+extern const struct mtd_pairing_scheme dist3_pairing_scheme;
+
+/* Core functions */
+const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id);
+int nand_bbm_get_next_page(struct nand_chip *chip, int page);
+int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs);
+int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
+ int allowbbt);
+void onfi_fill_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ enum nand_interface_type type,
+ unsigned int timing_mode);
+unsigned int
+onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings);
+int nand_choose_best_sdr_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ struct nand_sdr_timings *spec_timings);
+const struct nand_interface_config *nand_get_reset_interface_config(void);
+int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page);
+int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page);
+int nand_exit_status_op(struct nand_chip *chip);
+int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len);
+void nand_decode_ext_id(struct nand_chip *chip);
+void panic_nand_wait(struct nand_chip *chip, unsigned long timeo);
+void sanitize_string(uint8_t *s, size_t len);
+
+static inline bool nand_has_exec_op(struct nand_chip *chip)
+{
+ if (!chip->controller || !chip->controller->ops ||
+ !chip->controller->ops->exec_op)
+ return false;
+
+ return true;
+}
+
+static inline int nand_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!nand_has_exec_op(chip))
+ return 0;
+
+ return chip->controller->ops->exec_op(chip, op, true);
+}
+
+static inline int nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!nand_has_exec_op(chip))
+ return -ENOTSUPP;
+
+ if (WARN_ON(op->cs >= nanddev_ntargets(&chip->base)))
+ return -EINVAL;
+
+ return chip->controller->ops->exec_op(chip, op, false);
+}
+
+static inline bool nand_controller_can_setup_interface(struct nand_chip *chip)
+{
+ if (!chip->controller || !chip->controller->ops ||
+ !chip->controller->ops->setup_interface)
+ return false;
+
+ if (chip->options & NAND_KEEP_TIMINGS)
+ return false;
+
+ return true;
+}
+
+/* BBT functions */
+int nand_markbad_bbt(struct nand_chip *chip, loff_t offs);
+int nand_isreserved_bbt(struct nand_chip *chip, loff_t offs);
+int nand_isbad_bbt(struct nand_chip *chip, loff_t offs, int allowbbt);
+
+/* Legacy */
+void nand_legacy_set_defaults(struct nand_chip *chip);
+void nand_legacy_adjust_cmdfunc(struct nand_chip *chip);
+int nand_legacy_check_hooks(struct nand_chip *chip);
+
+/* ONFI functions */
+u16 onfi_crc16(u16 crc, u8 const *p, size_t len);
+int nand_onfi_detect(struct nand_chip *chip);
+
+/* JEDEC functions */
+int nand_jedec_detect(struct nand_chip *chip);
+
+#endif /* __LINUX_RAWNAND_INTERNALS */
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
new file mode 100644
index 000000000..9e728c731
--- /dev/null
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for NAND MLC Controller in LPC32xx
+ *
+ * Author: Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 WORK Microwave GmbH
+ * Copyright © 2011, 2012 Roland Stigge
+ *
+ * NAND Flash Controller Operation:
+ * - Read: Auto Decode
+ * - Write: Auto Encode
+ * - Tested Page Sizes: 2048, 4096
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/lpc32xx_mlc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+
+#define DRV_NAME "lpc32xx_mlc"
+
+/**********************************************************************
+* MLC NAND controller register offsets
+**********************************************************************/
+
+#define MLC_BUFF(x) (x + 0x00000)
+#define MLC_DATA(x) (x + 0x08000)
+#define MLC_CMD(x) (x + 0x10000)
+#define MLC_ADDR(x) (x + 0x10004)
+#define MLC_ECC_ENC_REG(x) (x + 0x10008)
+#define MLC_ECC_DEC_REG(x) (x + 0x1000C)
+#define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
+#define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
+#define MLC_RPR(x) (x + 0x10018)
+#define MLC_WPR(x) (x + 0x1001C)
+#define MLC_RUBP(x) (x + 0x10020)
+#define MLC_ROBP(x) (x + 0x10024)
+#define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
+#define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
+#define MLC_ICR(x) (x + 0x10030)
+#define MLC_TIME_REG(x) (x + 0x10034)
+#define MLC_IRQ_MR(x) (x + 0x10038)
+#define MLC_IRQ_SR(x) (x + 0x1003C)
+#define MLC_LOCK_PR(x) (x + 0x10044)
+#define MLC_ISR(x) (x + 0x10048)
+#define MLC_CEH(x) (x + 0x1004C)
+
+/**********************************************************************
+* MLC_CMD bit definitions
+**********************************************************************/
+#define MLCCMD_RESET 0xFF
+
+/**********************************************************************
+* MLC_ICR bit definitions
+**********************************************************************/
+#define MLCICR_WPROT (1 << 3)
+#define MLCICR_LARGEBLOCK (1 << 2)
+#define MLCICR_LONGADDR (1 << 1)
+#define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
+
+/**********************************************************************
+* MLC_TIME_REG bit definitions
+**********************************************************************/
+#define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
+#define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
+#define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
+#define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
+#define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
+#define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
+#define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
+
+/**********************************************************************
+* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
+**********************************************************************/
+#define MLCIRQ_NAND_READY (1 << 5)
+#define MLCIRQ_CONTROLLER_READY (1 << 4)
+#define MLCIRQ_DECODE_FAILURE (1 << 3)
+#define MLCIRQ_DECODE_ERROR (1 << 2)
+#define MLCIRQ_ECC_READY (1 << 1)
+#define MLCIRQ_WRPROT_FAULT (1 << 0)
+
+/**********************************************************************
+* MLC_LOCK_PR bit definitions
+**********************************************************************/
+#define MLCLOCKPR_MAGIC 0xA25E
+
+/**********************************************************************
+* MLC_ISR bit definitions
+**********************************************************************/
+#define MLCISR_DECODER_FAILURE (1 << 6)
+#define MLCISR_ERRORS ((1 << 4) | (1 << 5))
+#define MLCISR_ERRORS_DETECTED (1 << 3)
+#define MLCISR_ECC_READY (1 << 2)
+#define MLCISR_CONTROLLER_READY (1 << 1)
+#define MLCISR_NAND_READY (1 << 0)
+
+/**********************************************************************
+* MLC_CEH bit definitions
+**********************************************************************/
+#define MLCCEH_NORMAL (1 << 0)
+
+struct lpc32xx_nand_cfg_mlc {
+ uint32_t tcea_delay;
+ uint32_t busy_delay;
+ uint32_t nand_ta;
+ uint32_t rd_high;
+ uint32_t rd_low;
+ uint32_t wr_high;
+ uint32_t wr_low;
+ int wp_gpio;
+ struct mtd_partition *parts;
+ unsigned num_parts;
+};
+
+static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
+ oobregion->length = nand_chip->ecc.bytes;
+
+ return 0;
+}
+
+static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = 16 * section;
+ oobregion->length = 16 - nand_chip->ecc.bytes;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
+ .ecc = lpc32xx_ooblayout_ecc,
+ .free = lpc32xx_ooblayout_free,
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt = {
+ .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+ NAND_BBT_WRITE,
+ .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
+ .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+ NAND_BBT_WRITE,
+ .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+struct lpc32xx_nand_host {
+ struct platform_device *pdev;
+ struct nand_chip nand_chip;
+ struct lpc32xx_mlc_platform_data *pdata;
+ struct clk *clk;
+ void __iomem *io_base;
+ int irq;
+ struct lpc32xx_nand_cfg_mlc *ncfg;
+ struct completion comp_nand;
+ struct completion comp_controller;
+ uint32_t llptr;
+ /*
+ * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
+ */
+ dma_addr_t oob_buf_phy;
+ /*
+ * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
+ */
+ uint8_t *oob_buf;
+ /* Physical address of DMA base address */
+ dma_addr_t io_base_phy;
+
+ struct completion comp_dma;
+ struct dma_chan *dma_chan;
+ struct dma_slave_config dma_slave_config;
+ struct scatterlist sgl;
+ uint8_t *dma_buf;
+ uint8_t *dummy_buf;
+ int mlcsubpages; /* number of 512bytes-subpages */
+};
+
+/*
+ * Activate/Deactivate DMA Operation:
+ *
+ * Using the PL080 DMA Controller for transferring the 512 byte subpages
+ * instead of doing readl() / writel() in a loop slows it down significantly.
+ * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
+ *
+ * - readl() of 128 x 32 bits in a loop: ~20us
+ * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
+ * - DMA read of 512 bytes (32 bit, no bursts): ~100us
+ *
+ * This applies to the transfer itself. In the DMA case: only the
+ * wait_for_completion() (DMA setup _not_ included).
+ *
+ * Note that the 512 bytes subpage transfer is done directly from/to a
+ * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
+ * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
+ * controller transferring data between its internal buffer to/from the NAND
+ * chip.)
+ *
+ * Therefore, using the PL080 DMA is disabled by default, for now.
+ *
+ */
+static int use_dma;
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+ uint32_t clkrate, tmp;
+
+ /* Reset MLC controller */
+ writel(MLCCMD_RESET, MLC_CMD(host->io_base));
+ udelay(1000);
+
+ /* Get base clock for MLC block */
+ clkrate = clk_get_rate(host->clk);
+ if (clkrate == 0)
+ clkrate = 104000000;
+
+ /* Unlock MLC_ICR
+ * (among others, will be locked again automatically) */
+ writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+ /* Configure MLC Controller: Large Block, 5 Byte Address */
+ tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
+ writel(tmp, MLC_ICR(host->io_base));
+
+ /* Unlock MLC_TIME_REG
+ * (among others, will be locked again automatically) */
+ writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+ /* Compute clock setup values, see LPC and NAND manual */
+ tmp = 0;
+ tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
+ tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
+ tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
+ tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
+ tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
+ tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
+ tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
+ writel(tmp, MLC_TIME_REG(host->io_base));
+
+ /* Enable IRQ for CONTROLLER_READY and NAND_READY */
+ writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
+ MLC_IRQ_MR(host->io_base));
+
+ /* Normal nCE operation: nCE controlled by controller */
+ writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
+ unsigned int ctrl)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writel(cmd, MLC_CMD(host->io_base));
+ else
+ writel(cmd, MLC_ADDR(host->io_base));
+ }
+}
+
+/*
+ * Read Device Ready (NAND device _and_ controller ready)
+ */
+static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
+
+ if ((readb(MLC_ISR(host->io_base)) &
+ (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
+ (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
+ return 1;
+
+ return 0;
+}
+
+static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+{
+ uint8_t sr;
+
+ /* Clear interrupt flag by reading status */
+ sr = readb(MLC_IRQ_SR(host->io_base));
+ if (sr & MLCIRQ_NAND_READY)
+ complete(&host->comp_nand);
+ if (sr & MLCIRQ_CONTROLLER_READY)
+ complete(&host->comp_controller);
+
+ return IRQ_HANDLED;
+}
+
+static int lpc32xx_waitfunc_nand(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
+ goto exit;
+
+ wait_for_completion(&host->comp_nand);
+
+ while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
+ /* Seems to be delayed sometimes by controller */
+ dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
+ cpu_relax();
+ }
+
+exit:
+ return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc_controller(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
+ goto exit;
+
+ wait_for_completion(&host->comp_controller);
+
+ while (!(readb(MLC_ISR(host->io_base)) &
+ MLCISR_CONTROLLER_READY)) {
+ dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
+ cpu_relax();
+ }
+
+exit:
+ return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc(struct nand_chip *chip)
+{
+ lpc32xx_waitfunc_nand(chip);
+ lpc32xx_waitfunc_controller(chip);
+
+ return NAND_STATUS_READY;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
+ enum dma_transfer_direction dir)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct dma_async_tx_descriptor *desc;
+ int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int res;
+
+ sg_init_one(&host->sgl, mem, len);
+
+ res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ if (res != 1) {
+ dev_err(mtd->dev.parent, "Failed to map sg list\n");
+ return -ENXIO;
+ }
+ desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+ flags);
+ if (!desc) {
+ dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+ goto out1;
+ }
+
+ init_completion(&host->comp_dma);
+ desc->callback = lpc32xx_dma_complete_func;
+ desc->callback_param = &host->comp_dma;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma_chan);
+
+ wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
+
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return 0;
+out1:
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return -ENXIO;
+}
+
+static int lpc32xx_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ int i, j;
+ uint8_t *oobbuf = chip->oob_poi;
+ uint32_t mlc_isr;
+ int res;
+ uint8_t *dma_buf;
+ bool dma_mapped;
+
+ if ((void *)buf <= high_memory) {
+ dma_buf = buf;
+ dma_mapped = true;
+ } else {
+ dma_buf = host->dma_buf;
+ dma_mapped = false;
+ }
+
+ /* Writing Command and Address */
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ /* For all sub-pages */
+ for (i = 0; i < host->mlcsubpages; i++) {
+ /* Start Auto Decode Command */
+ writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
+
+ /* Wait for Controller Ready */
+ lpc32xx_waitfunc_controller(chip);
+
+ /* Check ECC Error status */
+ mlc_isr = readl(MLC_ISR(host->io_base));
+ if (mlc_isr & MLCISR_DECODER_FAILURE) {
+ mtd->ecc_stats.failed++;
+ dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
+ } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
+ mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
+ }
+
+ /* Read 512 + 16 Bytes */
+ if (use_dma) {
+ res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+ DMA_DEV_TO_MEM);
+ if (res)
+ return res;
+ } else {
+ for (j = 0; j < (512 >> 2); j++) {
+ *((uint32_t *)(buf)) =
+ readl(MLC_BUFF(host->io_base));
+ buf += 4;
+ }
+ }
+ for (j = 0; j < (16 >> 2); j++) {
+ *((uint32_t *)(oobbuf)) =
+ readl(MLC_BUFF(host->io_base));
+ oobbuf += 4;
+ }
+ }
+
+ if (use_dma && !dma_mapped)
+ memcpy(buf, dma_buf, mtd->writesize);
+
+ return 0;
+}
+
+static int lpc32xx_write_page_lowlevel(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ const uint8_t *oobbuf = chip->oob_poi;
+ uint8_t *dma_buf = (uint8_t *)buf;
+ int res;
+ int i, j;
+
+ if (use_dma && (void *)buf >= high_memory) {
+ dma_buf = host->dma_buf;
+ memcpy(dma_buf, buf, mtd->writesize);
+ }
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ for (i = 0; i < host->mlcsubpages; i++) {
+ /* Start Encode */
+ writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
+
+ /* Write 512 + 6 Bytes to Buffer */
+ if (use_dma) {
+ res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+ DMA_MEM_TO_DEV);
+ if (res)
+ return res;
+ } else {
+ for (j = 0; j < (512 >> 2); j++) {
+ writel(*((uint32_t *)(buf)),
+ MLC_BUFF(host->io_base));
+ buf += 4;
+ }
+ }
+ writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
+ oobbuf += 4;
+ writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
+ oobbuf += 12;
+
+ /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
+ writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
+
+ /* Wait for Controller Ready */
+ lpc32xx_waitfunc_controller(chip);
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int lpc32xx_read_oob(struct nand_chip *chip, int page)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ /* Read whole page - necessary with MLC controller! */
+ lpc32xx_read_page(chip, host->dummy_buf, 1, page);
+
+ return 0;
+}
+
+static int lpc32xx_write_oob(struct nand_chip *chip, int page)
+{
+ /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
+ return 0;
+}
+
+/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
+static void lpc32xx_ecc_enable(struct nand_chip *chip, int mode)
+{
+ /* Always enabled! */
+}
+
+static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
+{
+ struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+ dma_cap_mask_t mask;
+
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+ "nand-mlc");
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Set direction to a sensible value even if the dmaengine driver
+ * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
+ * driver criticizes it as "alien transfer direction".
+ */
+ host->dma_slave_config.direction = DMA_DEV_TO_MEM;
+ host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.src_maxburst = 128;
+ host->dma_slave_config.dst_maxburst = 128;
+ /* DMA controller does flow control: */
+ host->dma_slave_config.device_fc = false;
+ host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
+ host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
+ if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+ dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+ goto out1;
+ }
+
+ return 0;
+out1:
+ dma_release_channel(host->dma_chan);
+ return -ENXIO;
+}
+
+static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
+{
+ struct lpc32xx_nand_cfg_mlc *ncfg;
+ struct device_node *np = dev->of_node;
+
+ ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+ if (!ncfg)
+ return NULL;
+
+ of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
+ of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
+ of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
+ of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
+ of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
+ of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
+ of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
+
+ if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
+ !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
+ !ncfg->wr_low) {
+ dev_err(dev, "chip parameters not specified correctly\n");
+ return NULL;
+ }
+
+ ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+ return ncfg;
+}
+
+static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct device *dev = &host->pdev->dev;
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dma_buf)
+ return -ENOMEM;
+
+ host->dummy_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dummy_buf)
+ return -ENOMEM;
+
+ chip->ecc.size = 512;
+ chip->ecc.hwctl = lpc32xx_ecc_enable;
+ chip->ecc.read_page_raw = lpc32xx_read_page;
+ chip->ecc.read_page = lpc32xx_read_page;
+ chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
+ chip->ecc.write_page = lpc32xx_write_page_lowlevel;
+ chip->ecc.write_oob = lpc32xx_write_oob;
+ chip->ecc.read_oob = lpc32xx_read_oob;
+ chip->ecc.strength = 4;
+ chip->ecc.bytes = 10;
+
+ mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+ host->mlcsubpages = mtd->writesize / 512;
+
+ return 0;
+}
+
+static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
+ .attach_chip = lpc32xx_nand_attach_chip,
+};
+
+/*
+ * Probe for NAND controller
+ */
+static int lpc32xx_nand_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct resource *rc;
+ int res;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->pdev = pdev;
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
+
+ host->io_base_phy = rc->start;
+
+ nand_chip = &host->nand_chip;
+ mtd = nand_to_mtd(nand_chip);
+ if (pdev->dev.of_node)
+ host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+ if (!host->ncfg) {
+ dev_err(&pdev->dev,
+ "Missing or bad NAND config from device tree\n");
+ return -ENOENT;
+ }
+ if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (gpio_is_valid(host->ncfg->wp_gpio) &&
+ gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+ dev_err(&pdev->dev, "GPIO not available\n");
+ return -EBUSY;
+ }
+ lpc32xx_wp_disable(host);
+
+ host->pdata = dev_get_platdata(&pdev->dev);
+
+ /* link the private data structures */
+ nand_set_controller_data(nand_chip, host);
+ nand_set_flash_node(nand_chip, pdev->dev.of_node);
+ mtd->dev.parent = &pdev->dev;
+
+ /* Get NAND clock */
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "Clock initialization failure\n");
+ res = -ENOENT;
+ goto free_gpio;
+ }
+ res = clk_prepare_enable(host->clk);
+ if (res)
+ goto put_clk;
+
+ nand_chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ nand_chip->legacy.dev_ready = lpc32xx_nand_device_ready;
+ nand_chip->legacy.chip_delay = 25; /* us */
+ nand_chip->legacy.IO_ADDR_R = MLC_DATA(host->io_base);
+ nand_chip->legacy.IO_ADDR_W = MLC_DATA(host->io_base);
+
+ /* Init NAND controller */
+ lpc32xx_nand_setup(host);
+
+ platform_set_drvdata(pdev, host);
+
+ /* Initialize function pointers */
+ nand_chip->legacy.waitfunc = lpc32xx_waitfunc;
+
+ nand_chip->options = NAND_NO_SUBPAGE_WRITE;
+ nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+ nand_chip->bbt_td = &lpc32xx_nand_bbt;
+ nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
+
+ if (use_dma) {
+ res = lpc32xx_dma_setup(host);
+ if (res) {
+ res = -EIO;
+ goto unprepare_clk;
+ }
+ }
+
+ /* initially clear interrupt status */
+ readb(MLC_IRQ_SR(host->io_base));
+
+ init_completion(&host->comp_nand);
+ init_completion(&host->comp_controller);
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0) {
+ res = -EINVAL;
+ goto release_dma_chan;
+ }
+
+ if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+ IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
+ dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
+ res = -ENXIO;
+ goto release_dma_chan;
+ }
+
+ /*
+ * Scan to find existence of the device and get the type of NAND device:
+ * SMALL block or LARGE block.
+ */
+ nand_chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ res = nand_scan(nand_chip, 1);
+ if (res)
+ goto free_irq;
+
+ mtd->name = DRV_NAME;
+
+ res = mtd_device_register(mtd, host->ncfg->parts,
+ host->ncfg->num_parts);
+ if (res)
+ goto cleanup_nand;
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(nand_chip);
+free_irq:
+ free_irq(host->irq, host);
+release_dma_chan:
+ if (use_dma)
+ dma_release_channel(host->dma_chan);
+unprepare_clk:
+ clk_disable_unprepare(host->clk);
+put_clk:
+ clk_put(host->clk);
+free_gpio:
+ lpc32xx_wp_enable(host);
+ gpio_free(host->ncfg->wp_gpio);
+
+ return res;
+}
+
+/*
+ * Remove NAND device
+ */
+static int lpc32xx_nand_remove(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ free_irq(host->irq, host);
+ if (use_dma)
+ dma_release_channel(host->dma_chan);
+
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+
+ lpc32xx_wp_enable(host);
+ gpio_free(host->ncfg->wp_gpio);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ /* Re-enable NAND clock */
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ return ret;
+
+ /* Fresh init of NAND controller */
+ lpc32xx_nand_setup(host);
+
+ /* Disable write protect */
+ lpc32xx_wp_disable(host);
+
+ return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Enable write protect for safety */
+ lpc32xx_wp_enable(host);
+
+ /* Disable clock */
+ clk_disable_unprepare(host->clk);
+ return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+ { .compatible = "nxp,lpc3220-mlc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+ .probe = lpc32xx_nand_probe,
+ .remove = lpc32xx_nand_remove,
+ .resume = lpc32xx_nand_resume,
+ .suspend = lpc32xx_nand_suspend,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = lpc32xx_nand_match,
+ },
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
new file mode 100644
index 000000000..dc7785e30
--- /dev/null
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -0,0 +1,1038 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NXP LPC32XX NAND SLC driver
+ *
+ * Authors:
+ * Kevin Wells <kevin.wells@nxp.com>
+ * Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 NXP Semiconductors
+ * Copyright © 2012 Roland Stigge
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/lpc32xx_slc.h>
+
+#define LPC32XX_MODNAME "lpc32xx-nand"
+
+/**********************************************************************
+* SLC NAND controller register offsets
+**********************************************************************/
+
+#define SLC_DATA(x) (x + 0x000)
+#define SLC_ADDR(x) (x + 0x004)
+#define SLC_CMD(x) (x + 0x008)
+#define SLC_STOP(x) (x + 0x00C)
+#define SLC_CTRL(x) (x + 0x010)
+#define SLC_CFG(x) (x + 0x014)
+#define SLC_STAT(x) (x + 0x018)
+#define SLC_INT_STAT(x) (x + 0x01C)
+#define SLC_IEN(x) (x + 0x020)
+#define SLC_ISR(x) (x + 0x024)
+#define SLC_ICR(x) (x + 0x028)
+#define SLC_TAC(x) (x + 0x02C)
+#define SLC_TC(x) (x + 0x030)
+#define SLC_ECC(x) (x + 0x034)
+#define SLC_DMA_DATA(x) (x + 0x038)
+
+/**********************************************************************
+* slc_ctrl register definitions
+**********************************************************************/
+#define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
+#define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
+#define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
+
+/**********************************************************************
+* slc_cfg register definitions
+**********************************************************************/
+#define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
+#define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
+#define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
+#define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
+#define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
+#define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
+
+/**********************************************************************
+* slc_stat register definitions
+**********************************************************************/
+#define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
+#define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
+#define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
+
+/**********************************************************************
+* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
+**********************************************************************/
+#define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
+#define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
+
+/**********************************************************************
+* slc_tac register definitions
+**********************************************************************/
+/* Computation of clock cycles on basis of controller and device clock rates */
+#define SLCTAC_CLOCKS(c, n, s) (min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
+
+/* Clock setting for RDY write sample wait time in 2*n clocks */
+#define SLCTAC_WDR(n) (((n) & 0xF) << 28)
+/* Write pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_WWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 24))
+/* Write hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WHOLD(c, n) (SLCTAC_CLOCKS(c, n, 20))
+/* Write setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WSETUP(c, n) (SLCTAC_CLOCKS(c, n, 16))
+/* Clock setting for RDY read sample wait time in 2*n clocks */
+#define SLCTAC_RDR(n) (((n) & 0xF) << 12)
+/* Read pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_RWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 8))
+/* Read hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RHOLD(c, n) (SLCTAC_CLOCKS(c, n, 4))
+/* Read setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RSETUP(c, n) (SLCTAC_CLOCKS(c, n, 0))
+
+/**********************************************************************
+* slc_ecc register definitions
+**********************************************************************/
+/* ECC line party fetch macro */
+#define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
+#define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
+
+/*
+ * DMA requires storage space for the DMA local buffer and the hardware ECC
+ * storage area. The DMA local buffer is only used if DMA mapping fails
+ * during runtime.
+ */
+#define LPC32XX_DMA_DATA_SIZE 4096
+#define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
+
+/* Number of bytes used for ECC stored in NAND per 256 bytes */
+#define LPC32XX_SLC_DEV_ECC_BYTES 3
+
+/*
+ * If the NAND base clock frequency can't be fetched, this frequency will be
+ * used instead as the base. This rate is used to setup the timing registers
+ * used for NAND accesses.
+ */
+#define LPC32XX_DEF_BUS_RATE 133250000
+
+/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
+#define LPC32XX_DMA_TIMEOUT 100
+
+/*
+ * NAND ECC Layout for small page NAND devices
+ * Note: For large and huge page devices, the default layouts are used
+ */
+static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 6;
+ oobregion->offset = 10;
+
+ return 0;
+}
+
+static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 4;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = 4;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
+ .ecc = lpc32xx_ooblayout_ecc,
+ .free = lpc32xx_ooblayout_free,
+};
+
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+/*
+ * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
+ * Note: Large page devices used the default layout
+ */
+static struct nand_bbt_descr bbt_smallpage_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+/*
+ * NAND platform configuration structure
+ */
+struct lpc32xx_nand_cfg_slc {
+ uint32_t wdr_clks;
+ uint32_t wwidth;
+ uint32_t whold;
+ uint32_t wsetup;
+ uint32_t rdr_clks;
+ uint32_t rwidth;
+ uint32_t rhold;
+ uint32_t rsetup;
+ int wp_gpio;
+ struct mtd_partition *parts;
+ unsigned num_parts;
+};
+
+struct lpc32xx_nand_host {
+ struct nand_chip nand_chip;
+ struct lpc32xx_slc_platform_data *pdata;
+ struct clk *clk;
+ void __iomem *io_base;
+ struct lpc32xx_nand_cfg_slc *ncfg;
+
+ struct completion comp;
+ struct dma_chan *dma_chan;
+ uint32_t dma_buf_len;
+ struct dma_slave_config dma_slave_config;
+ struct scatterlist sgl;
+
+ /*
+ * DMA and CPU addresses of ECC work area and data buffer
+ */
+ uint32_t *ecc_buf;
+ uint8_t *data_buf;
+ dma_addr_t io_base_dma;
+};
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+ uint32_t clkrate, tmp;
+
+ /* Reset SLC controller */
+ writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
+ udelay(1000);
+
+ /* Basic setup */
+ writel(0, SLC_CFG(host->io_base));
+ writel(0, SLC_IEN(host->io_base));
+ writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
+ SLC_ICR(host->io_base));
+
+ /* Get base clock for SLC block */
+ clkrate = clk_get_rate(host->clk);
+ if (clkrate == 0)
+ clkrate = LPC32XX_DEF_BUS_RATE;
+
+ /* Compute clock setup values */
+ tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
+ SLCTAC_WWIDTH(clkrate, host->ncfg->wwidth) |
+ SLCTAC_WHOLD(clkrate, host->ncfg->whold) |
+ SLCTAC_WSETUP(clkrate, host->ncfg->wsetup) |
+ SLCTAC_RDR(host->ncfg->rdr_clks) |
+ SLCTAC_RWIDTH(clkrate, host->ncfg->rwidth) |
+ SLCTAC_RHOLD(clkrate, host->ncfg->rhold) |
+ SLCTAC_RSETUP(clkrate, host->ncfg->rsetup);
+ writel(tmp, SLC_TAC(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ /* Does CE state need to be changed? */
+ tmp = readl(SLC_CFG(host->io_base));
+ if (ctrl & NAND_NCE)
+ tmp |= SLCCFG_CE_LOW;
+ else
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CFG(host->io_base));
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writel(cmd, SLC_CMD(host->io_base));
+ else
+ writel(cmd, SLC_ADDR(host->io_base));
+ }
+}
+
+/*
+ * Read the Device Ready pin
+ */
+static int lpc32xx_nand_device_ready(struct nand_chip *chip)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ int rdy = 0;
+
+ if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
+ rdy = 1;
+
+ return rdy;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+/*
+ * Prepares SLC for transfers with H/W ECC enabled
+ */
+static void lpc32xx_nand_ecc_enable(struct nand_chip *chip, int mode)
+{
+ /* Hardware ECC is enabled automatically in hardware as needed */
+}
+
+/*
+ * Calculates the ECC for the data
+ */
+static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ /*
+ * ECC is calculated automatically in hardware during syndrome read
+ * and write operations, so it doesn't need to be calculated here.
+ */
+ return 0;
+}
+
+/*
+ * Read a single byte from NAND device
+ */
+static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ return (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device read without ECC
+ */
+static void lpc32xx_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ /* Direct device read with no ECC */
+ while (len-- > 0)
+ *buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device write without ECC
+ */
+static void lpc32xx_nand_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ /* Direct device write with no ECC */
+ while (len-- > 0)
+ writel((uint32_t)*buf++, SLC_DATA(host->io_base));
+}
+
+/*
+ * Read the OOB data from the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_read_oob_syndrome(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+}
+
+/*
+ * Write the OOB data to the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_write_oob_syndrome(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+}
+
+/*
+ * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
+ */
+static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
+{
+ int i;
+
+ for (i = 0; i < (count * 3); i += 3) {
+ uint32_t ce = ecc[i / 3];
+ ce = ~(ce << 2) & 0xFFFFFF;
+ spare[i + 2] = (uint8_t)(ce & 0xFF);
+ ce >>= 8;
+ spare[i + 1] = (uint8_t)(ce & 0xFF);
+ ce >>= 8;
+ spare[i] = (uint8_t)(ce & 0xFF);
+ }
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
+ void *mem, int len, enum dma_transfer_direction dir)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct dma_async_tx_descriptor *desc;
+ int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int res;
+
+ host->dma_slave_config.direction = dir;
+ host->dma_slave_config.src_addr = dma;
+ host->dma_slave_config.dst_addr = dma;
+ host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.src_maxburst = 4;
+ host->dma_slave_config.dst_maxburst = 4;
+ /* DMA controller does flow control: */
+ host->dma_slave_config.device_fc = false;
+ if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+ dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+ return -ENXIO;
+ }
+
+ sg_init_one(&host->sgl, mem, len);
+
+ res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ if (res != 1) {
+ dev_err(mtd->dev.parent, "Failed to map sg list\n");
+ return -ENXIO;
+ }
+ desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+ flags);
+ if (!desc) {
+ dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+ goto out1;
+ }
+
+ init_completion(&host->comp);
+ desc->callback = lpc32xx_dma_complete_func;
+ desc->callback_param = &host->comp;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma_chan);
+
+ wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
+
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+
+ return 0;
+out1:
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return -ENXIO;
+}
+
+/*
+ * DMA read/write transfers with ECC support
+ */
+static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
+ int read)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ int i, status = 0;
+ unsigned long timeout;
+ int res;
+ enum dma_transfer_direction dir =
+ read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+ uint8_t *dma_buf;
+ bool dma_mapped;
+
+ if ((void *)buf <= high_memory) {
+ dma_buf = buf;
+ dma_mapped = true;
+ } else {
+ dma_buf = host->data_buf;
+ dma_mapped = false;
+ if (!read)
+ memcpy(host->data_buf, buf, mtd->writesize);
+ }
+
+ if (read) {
+ writel(readl(SLC_CFG(host->io_base)) |
+ SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+ SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
+ } else {
+ writel((readl(SLC_CFG(host->io_base)) |
+ SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
+ ~SLCCFG_DMA_DIR,
+ SLC_CFG(host->io_base));
+ }
+
+ /* Clear initial ECC */
+ writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
+
+ /* Transfer size is data area only */
+ writel(mtd->writesize, SLC_TC(host->io_base));
+
+ /* Start transfer in the NAND controller */
+ writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
+ SLC_CTRL(host->io_base));
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ /* Data */
+ res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
+ dma_buf + i * chip->ecc.size,
+ mtd->writesize / chip->ecc.steps, dir);
+ if (res)
+ return res;
+
+ /* Always _read_ ECC */
+ if (i == chip->ecc.steps - 1)
+ break;
+ if (!read) /* ECC availability delayed on write */
+ udelay(10);
+ res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
+ &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
+ if (res)
+ return res;
+ }
+
+ /*
+ * According to NXP, the DMA can be finished here, but the NAND
+ * controller may still have buffered data. After porting to using the
+ * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
+ * appears to be always true, according to tests. Keeping the check for
+ * safety reasons for now.
+ */
+ if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
+ dev_warn(mtd->dev.parent, "FIFO not empty!\n");
+ timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
+ while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
+ time_before(jiffies, timeout))
+ cpu_relax();
+ if (!time_before(jiffies, timeout)) {
+ dev_err(mtd->dev.parent, "FIFO held data too long\n");
+ status = -EIO;
+ }
+ }
+
+ /* Read last calculated ECC value */
+ if (!read)
+ udelay(10);
+ host->ecc_buf[chip->ecc.steps - 1] =
+ readl(SLC_ECC(host->io_base));
+
+ /* Flush DMA */
+ dmaengine_terminate_all(host->dma_chan);
+
+ if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
+ readl(SLC_TC(host->io_base))) {
+ /* Something is left in the FIFO, something is wrong */
+ dev_err(mtd->dev.parent, "DMA FIFO failure\n");
+ status = -EIO;
+ }
+
+ /* Stop DMA & HW ECC */
+ writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
+ SLC_CTRL(host->io_base));
+ writel(readl(SLC_CFG(host->io_base)) &
+ ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+ SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
+
+ if (!dma_mapped && read)
+ memcpy(buf, host->data_buf, mtd->writesize);
+
+ return status;
+}
+
+/*
+ * Read the data and OOB data from the device, use ECC correction with the
+ * data, disable ECC for the OOB data
+ */
+static int lpc32xx_nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct mtd_oob_region oobregion = { };
+ int stat, i, status, error;
+ uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
+
+ /* Issue read command */
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ /* Read data and oob, calculate ECC */
+ status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
+
+ /* Get OOB data */
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ /* Convert to stored ECC format */
+ lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
+
+ /* Pointer to ECC data retrieved from NAND spare area */
+ error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ if (error)
+ return error;
+
+ oobecc = chip->oob_poi + oobregion.offset;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ stat = chip->ecc.correct(chip, buf, oobecc,
+ &tmpecc[i * chip->ecc.bytes]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+
+ buf += chip->ecc.size;
+ oobecc += chip->ecc.bytes;
+ }
+
+ return status;
+}
+
+/*
+ * Read the data and OOB data from the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_read_page_raw_syndrome(struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Issue read command */
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ /* Raw reads can just use the FIFO interface */
+ chip->legacy.read_buf(chip, buf, chip->ecc.size * chip->ecc.steps);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, use ECC with the data,
+ * disable ECC for the OOB data
+ */
+static int lpc32xx_nand_write_page_syndrome(struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct mtd_oob_region oobregion = { };
+ uint8_t *pb;
+ int error;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ /* Write data, calculate ECC on outbound data */
+ error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
+ if (error)
+ return error;
+
+ /*
+ * The calculated ECC needs some manual work done to it before
+ * committing it to NAND. Process the calculated ECC and place
+ * the resultant values directly into the OOB buffer. */
+ error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ if (error)
+ return error;
+
+ pb = chip->oob_poi + oobregion.offset;
+ lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
+
+ /* Write ECC data to device */
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+/*
+ * Write the data and OOB data to the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_write_page_raw_syndrome(struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Raw writes can just use the FIFO interface */
+ nand_prog_page_begin_op(chip, page, 0, buf,
+ chip->ecc.size * chip->ecc.steps);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
+{
+ struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+ dma_cap_mask_t mask;
+
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+ "nand-slc");
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
+{
+ struct lpc32xx_nand_cfg_slc *ncfg;
+ struct device_node *np = dev->of_node;
+
+ ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+ if (!ncfg)
+ return NULL;
+
+ of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
+ of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
+ of_property_read_u32(np, "nxp,whold", &ncfg->whold);
+ of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
+ of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
+ of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
+ of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
+ of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
+
+ if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
+ !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
+ !ncfg->rhold || !ncfg->rsetup) {
+ dev_err(dev, "chip parameters not specified correctly\n");
+ return NULL;
+ }
+
+ ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+ return ncfg;
+}
+
+static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ /* OOB and ECC CPU and DMA work areas */
+ host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
+
+ /*
+ * Small page FLASH has a unique OOB layout, but large and huge
+ * page FLASH use the standard layout. Small page FLASH uses a
+ * custom BBT marker layout.
+ */
+ if (mtd->writesize <= 512)
+ mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ /* These sizes remain the same regardless of page size */
+ chip->ecc.size = 256;
+ chip->ecc.strength = 1;
+ chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
+ chip->ecc.prepad = 0;
+ chip->ecc.postpad = 0;
+ chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
+ chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
+ chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
+ chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
+ chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
+ chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
+ chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
+
+ /*
+ * Use a custom BBT marker setup for small page FLASH that
+ * won't interfere with the ECC layout. Large and huge page
+ * FLASH use the standard layout.
+ */
+ if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
+ mtd->writesize <= 512) {
+ chip->bbt_td = &bbt_smallpage_main_descr;
+ chip->bbt_md = &bbt_smallpage_mirror_descr;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
+ .attach_chip = lpc32xx_nand_attach_chip,
+};
+
+/*
+ * Probe for NAND controller
+ */
+static int lpc32xx_nand_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource *rc;
+ int res;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
+
+ host->io_base_dma = rc->start;
+ if (pdev->dev.of_node)
+ host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+ if (!host->ncfg) {
+ dev_err(&pdev->dev,
+ "Missing or bad NAND config from device tree\n");
+ return -ENOENT;
+ }
+ if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
+ host->ncfg->wp_gpio, "NAND WP")) {
+ dev_err(&pdev->dev, "GPIO not available\n");
+ return -EBUSY;
+ }
+ lpc32xx_wp_disable(host);
+
+ host->pdata = dev_get_platdata(&pdev->dev);
+
+ chip = &host->nand_chip;
+ mtd = nand_to_mtd(chip);
+ nand_set_controller_data(chip, host);
+ nand_set_flash_node(chip, pdev->dev.of_node);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+
+ /* Get NAND clock */
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "Clock failure\n");
+ res = -ENOENT;
+ goto enable_wp;
+ }
+ res = clk_prepare_enable(host->clk);
+ if (res)
+ goto enable_wp;
+
+ /* Set NAND IO addresses and command/ready functions */
+ chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
+ chip->legacy.IO_ADDR_W = SLC_DATA(host->io_base);
+ chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ chip->legacy.dev_ready = lpc32xx_nand_device_ready;
+ chip->legacy.chip_delay = 20; /* 20us command delay time */
+
+ /* Init NAND controller */
+ lpc32xx_nand_setup(host);
+
+ platform_set_drvdata(pdev, host);
+
+ /* NAND callbacks for LPC32xx SLC hardware */
+ chip->legacy.read_byte = lpc32xx_nand_read_byte;
+ chip->legacy.read_buf = lpc32xx_nand_read_buf;
+ chip->legacy.write_buf = lpc32xx_nand_write_buf;
+
+ /*
+ * Allocate a large enough buffer for a single huge page plus
+ * extra space for the spare area and ECC storage area
+ */
+ host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
+ host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
+ GFP_KERNEL);
+ if (host->data_buf == NULL) {
+ res = -ENOMEM;
+ goto unprepare_clk;
+ }
+
+ res = lpc32xx_nand_dma_setup(host);
+ if (res) {
+ res = -EIO;
+ goto unprepare_clk;
+ }
+
+ /* Find NAND device */
+ chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ res = nand_scan(chip, 1);
+ if (res)
+ goto release_dma;
+
+ mtd->name = "nxp_lpc3220_slc";
+ res = mtd_device_register(mtd, host->ncfg->parts,
+ host->ncfg->num_parts);
+ if (res)
+ goto cleanup_nand;
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(chip);
+release_dma:
+ dma_release_channel(host->dma_chan);
+unprepare_clk:
+ clk_disable_unprepare(host->clk);
+enable_wp:
+ lpc32xx_wp_enable(host);
+
+ return res;
+}
+
+/*
+ * Remove NAND device.
+ */
+static int lpc32xx_nand_remove(struct platform_device *pdev)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ dma_release_channel(host->dma_chan);
+
+ /* Force CE high */
+ tmp = readl(SLC_CTRL(host->io_base));
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CTRL(host->io_base));
+
+ clk_disable_unprepare(host->clk);
+ lpc32xx_wp_enable(host);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ /* Re-enable NAND clock */
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ return ret;
+
+ /* Fresh init of NAND controller */
+ lpc32xx_nand_setup(host);
+
+ /* Disable write protect */
+ lpc32xx_wp_disable(host);
+
+ return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Force CE high */
+ tmp = readl(SLC_CTRL(host->io_base));
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CTRL(host->io_base));
+
+ /* Enable write protect for safety */
+ lpc32xx_wp_enable(host);
+
+ /* Disable clock */
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+ { .compatible = "nxp,lpc3220-slc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+ .probe = lpc32xx_nand_probe,
+ .remove = lpc32xx_nand_remove,
+ .resume = lpc32xx_nand_resume,
+ .suspend = lpc32xx_nand_suspend,
+ .driver = {
+ .name = LPC32XX_MODNAME,
+ .of_match_table = lpc32xx_nand_match,
+ },
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
new file mode 100644
index 000000000..9ed3ff7f4
--- /dev/null
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -0,0 +1,3176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell NAND flash controller driver
+ *
+ * Copyright (C) 2017 Marvell
+ * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
+ *
+ *
+ * This NAND controller driver handles two versions of the hardware,
+ * one is called NFCv1 and is available on PXA SoCs and the other is
+ * called NFCv2 and is available on Armada SoCs.
+ *
+ * The main visible difference is that NFCv1 only has Hamming ECC
+ * capabilities, while NFCv2 also embeds a BCH ECC engine. Also, DMA
+ * is not used with NFCv2.
+ *
+ * The ECC layouts are depicted in details in Marvell AN-379, but here
+ * is a brief description.
+ *
+ * When using Hamming, the data is split in 512B chunks (either 1, 2
+ * or 4) and each chunk will have its own ECC "digest" of 6B at the
+ * beginning of the OOB area and eventually the remaining free OOB
+ * bytes (also called "spare" bytes in the driver). This engine
+ * corrects up to 1 bit per chunk and detects reliably an error if
+ * there are at most 2 bitflips. Here is the page layout used by the
+ * controller when Hamming is chosen:
+ *
+ * +-------------------------------------------------------------+
+ * | Data 1 | ... | Data N | ECC 1 | ... | ECCN | Free OOB bytes |
+ * +-------------------------------------------------------------+
+ *
+ * When using the BCH engine, there are N identical (data + free OOB +
+ * ECC) sections and potentially an extra one to deal with
+ * configurations where the chosen (data + free OOB + ECC) sizes do
+ * not align with the page (data + OOB) size. ECC bytes are always
+ * 30B per ECC chunk. Here is the page layout used by the controller
+ * when BCH is chosen:
+ *
+ * +-----------------------------------------
+ * | Data 1 | Free OOB bytes 1 | ECC 1 | ...
+ * +-----------------------------------------
+ *
+ * -------------------------------------------
+ * ... | Data N | Free OOB bytes N | ECC N |
+ * -------------------------------------------
+ *
+ * --------------------------------------------+
+ * Last Data | Last Free OOB bytes | Last ECC |
+ * --------------------------------------------+
+ *
+ * In both cases, the layout seen by the user is always: all data
+ * first, then all free OOB bytes and finally all ECC bytes. With BCH,
+ * ECC bytes are 30B long and are padded with 0xFF to align on 32
+ * bytes.
+ *
+ * The controller has certain limitations that are handled by the
+ * driver:
+ * - It can only read 2k at a time. To overcome this limitation, the
+ * driver issues data cycles on the bus, without issuing new
+ * CMD + ADDR cycles. The Marvell term is "naked" operations.
+ * - The ECC strength in BCH mode cannot be tuned. It is fixed 16
+ * bits. What can be tuned is the ECC block size as long as it
+ * stays between 512B and 2kiB. It's usually chosen based on the
+ * chip ECC requirements. For instance, using 2kiB ECC chunks
+ * provides 4b/512B correctability.
+ * - The controller will always treat data bytes, free OOB bytes
+ * and ECC bytes in that order, no matter what the real layout is
+ * (which is usually all data then all OOB bytes). The
+ * marvell_nfc_layouts array below contains the currently
+ * supported layouts.
+ * - Because of these weird layouts, the Bad Block Markers can be
+ * located in data section. In this case, the NAND_BBT_NO_OOB_BBM
+ * option must be set to prevent scanning/writing bad block
+ * markers.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <asm/unaligned.h>
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
+
+/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
+#define FIFO_DEPTH 8
+#define FIFO_REP(x) (x / sizeof(u32))
+#define BCH_SEQ_READS (32 / FIFO_DEPTH)
+/* NFC does not support transfers of larger chunks at a time */
+#define MAX_CHUNK_SIZE 2112
+/* NFCv1 cannot read more that 7 bytes of ID */
+#define NFCV1_READID_LEN 7
+/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
+#define POLL_PERIOD 0
+#define POLL_TIMEOUT 100000
+/* Interrupt maximum wait period in ms */
+#define IRQ_TIMEOUT 1000
+/* Latency in clock cycles between SoC pins and NFC logic */
+#define MIN_RD_DEL_CNT 3
+/* Maximum number of contiguous address cycles */
+#define MAX_ADDRESS_CYC_NFCV1 5
+#define MAX_ADDRESS_CYC_NFCV2 7
+/* System control registers/bits to enable the NAND controller on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
+#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
+#define GENCONF_CLK_GATING_CTRL 0x220
+#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
+#define GENCONF_ND_CLK_CTRL 0x700
+#define GENCONF_ND_CLK_CTRL_EN BIT(0)
+
+/* NAND controller data flash control register */
+#define NDCR 0x00
+#define NDCR_ALL_INT GENMASK(11, 0)
+#define NDCR_CS1_CMDDM BIT(7)
+#define NDCR_CS0_CMDDM BIT(8)
+#define NDCR_RDYM BIT(11)
+#define NDCR_ND_ARB_EN BIT(12)
+#define NDCR_RA_START BIT(15)
+#define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
+#define NDCR_DWIDTH_M BIT(26)
+#define NDCR_DWIDTH_C BIT(27)
+#define NDCR_ND_RUN BIT(28)
+#define NDCR_DMA_EN BIT(29)
+#define NDCR_ECC_EN BIT(30)
+#define NDCR_SPARE_EN BIT(31)
+#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
+ NDCR_DWIDTH_M | NDCR_DWIDTH_C))
+
+/* NAND interface timing parameter 0 register */
+#define NDTR0 0x04
+#define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
+#define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
+#define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
+#define NDTR0_SEL_NRE_EDGE BIT(7)
+#define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
+#define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
+#define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
+#define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
+#define NDTR0_SELCNTR BIT(26)
+#define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
+
+/* NAND interface timing parameter 1 register */
+#define NDTR1 0x0C
+#define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
+#define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
+#define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
+#define NDTR1_PRESCALE BIT(14)
+#define NDTR1_WAIT_MODE BIT(15)
+#define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
+
+/* NAND controller status register */
+#define NDSR 0x14
+#define NDSR_WRCMDREQ BIT(0)
+#define NDSR_RDDREQ BIT(1)
+#define NDSR_WRDREQ BIT(2)
+#define NDSR_CORERR BIT(3)
+#define NDSR_UNCERR BIT(4)
+#define NDSR_CMDD(cs) BIT(8 - cs)
+#define NDSR_RDY(rb) BIT(11 + rb)
+#define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
+
+/* NAND ECC control register */
+#define NDECCCTRL 0x28
+#define NDECCCTRL_BCH_EN BIT(0)
+
+/* NAND controller data buffer register */
+#define NDDB 0x40
+
+/* NAND controller command buffer 0 register */
+#define NDCB0 0x48
+#define NDCB0_CMD1(x) ((x & 0xFF) << 0)
+#define NDCB0_CMD2(x) ((x & 0xFF) << 8)
+#define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
+#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
+#define NDCB0_DBC BIT(19)
+#define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
+#define NDCB0_CSEL BIT(24)
+#define NDCB0_RDY_BYP BIT(27)
+#define NDCB0_LEN_OVRD BIT(28)
+#define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
+
+/* NAND controller command buffer 1 register */
+#define NDCB1 0x4C
+#define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
+#define NDCB1_ADDRS_PAGE(x) (x << 16)
+
+/* NAND controller command buffer 2 register */
+#define NDCB2 0x50
+#define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
+#define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
+
+/* NAND controller command buffer 3 register */
+#define NDCB3 0x54
+#define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
+#define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
+
+/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
+#define TYPE_READ 0
+#define TYPE_WRITE 1
+#define TYPE_ERASE 2
+#define TYPE_READ_ID 3
+#define TYPE_STATUS 4
+#define TYPE_RESET 5
+#define TYPE_NAKED_CMD 6
+#define TYPE_NAKED_ADDR 7
+#define TYPE_MASK 7
+#define XTYPE_MONOLITHIC_RW 0
+#define XTYPE_LAST_NAKED_RW 1
+#define XTYPE_FINAL_COMMAND 3
+#define XTYPE_READ 4
+#define XTYPE_WRITE_DISPATCH 4
+#define XTYPE_NAKED_RW 5
+#define XTYPE_COMMAND_DISPATCH 6
+#define XTYPE_MASK 7
+
+/**
+ * struct marvell_hw_ecc_layout - layout of Marvell ECC
+ *
+ * Marvell ECC engine works differently than the others, in order to limit the
+ * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
+ * per subpage, and depending on a the desired strength needed by the NAND chip,
+ * a particular layout mixing data/spare/ecc is defined, with a possible last
+ * chunk smaller that the others.
+ *
+ * @writesize: Full page size on which the layout applies
+ * @chunk: Desired ECC chunk size on which the layout applies
+ * @strength: Desired ECC strength (per chunk size bytes) on which the
+ * layout applies
+ * @nchunks: Total number of chunks
+ * @full_chunk_cnt: Number of full-sized chunks, which is the number of
+ * repetitions of the pattern:
+ * (data_bytes + spare_bytes + ecc_bytes).
+ * @data_bytes: Number of data bytes per chunk
+ * @spare_bytes: Number of spare bytes per chunk
+ * @ecc_bytes: Number of ecc bytes per chunk
+ * @last_data_bytes: Number of data bytes in the last chunk
+ * @last_spare_bytes: Number of spare bytes in the last chunk
+ * @last_ecc_bytes: Number of ecc bytes in the last chunk
+ */
+struct marvell_hw_ecc_layout {
+ /* Constraints */
+ int writesize;
+ int chunk;
+ int strength;
+ /* Corresponding layout */
+ int nchunks;
+ int full_chunk_cnt;
+ int data_bytes;
+ int spare_bytes;
+ int ecc_bytes;
+ int last_data_bytes;
+ int last_spare_bytes;
+ int last_ecc_bytes;
+};
+
+#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
+ { \
+ .writesize = ws, \
+ .chunk = dc, \
+ .strength = ds, \
+ .nchunks = nc, \
+ .full_chunk_cnt = fcc, \
+ .data_bytes = db, \
+ .spare_bytes = sb, \
+ .ecc_bytes = eb, \
+ .last_data_bytes = ldb, \
+ .last_spare_bytes = lsb, \
+ .last_ecc_bytes = leb, \
+ }
+
+/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
+static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
+ MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30),
+ MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
+ MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30),
+};
+
+/**
+ * struct marvell_nand_chip_sel - CS line description
+ *
+ * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
+ * is made by a field in NDCB0 register, and in another field in NDCB2 register.
+ * The datasheet describes the logic with an error: ADDR5 field is once
+ * declared at the beginning of NDCB2, and another time at its end. Because the
+ * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
+ * to use the last bit of this field instead of the first ones.
+ *
+ * @cs: Wanted CE lane.
+ * @ndcb0_csel: Value of the NDCB0 register with or without the flag
+ * selecting the wanted CE lane. This is set once when
+ * the Device Tree is probed.
+ * @rb: Ready/Busy pin for the flash chip
+ */
+struct marvell_nand_chip_sel {
+ unsigned int cs;
+ u32 ndcb0_csel;
+ unsigned int rb;
+};
+
+/**
+ * struct marvell_nand_chip - stores NAND chip device related information
+ *
+ * @chip: Base NAND chip structure
+ * @node: Used to store NAND chips into a list
+ * @layout: NAND layout when using hardware ECC
+ * @ndcr: Controller register value for this NAND chip
+ * @ndtr0: Timing registers 0 value for this NAND chip
+ * @ndtr1: Timing registers 1 value for this NAND chip
+ * @addr_cyc: Amount of cycles needed to pass column address
+ * @selected_die: Current active CS
+ * @nsels: Number of CS lines required by the NAND chip
+ * @sels: Array of CS lines descriptions
+ */
+struct marvell_nand_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ const struct marvell_hw_ecc_layout *layout;
+ u32 ndcr;
+ u32 ndtr0;
+ u32 ndtr1;
+ int addr_cyc;
+ int selected_die;
+ unsigned int nsels;
+ struct marvell_nand_chip_sel sels[];
+};
+
+static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct marvell_nand_chip, chip);
+}
+
+static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
+ *nand)
+{
+ return &nand->sels[nand->selected_die];
+}
+
+/**
+ * struct marvell_nfc_caps - NAND controller capabilities for distinction
+ * between compatible strings
+ *
+ * @max_cs_nb: Number of Chip Select lines available
+ * @max_rb_nb: Number of Ready/Busy lines available
+ * @need_system_controller: Indicates if the SoC needs to have access to the
+ * system controller (ie. to enable the NAND controller)
+ * @legacy_of_bindings: Indicates if DT parsing must be done using the old
+ * fashion way
+ * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
+ * BCH error detection and correction algorithm,
+ * NDCB3 register has been added
+ * @use_dma: Use dma for data transfers
+ */
+struct marvell_nfc_caps {
+ unsigned int max_cs_nb;
+ unsigned int max_rb_nb;
+ bool need_system_controller;
+ bool legacy_of_bindings;
+ bool is_nfcv2;
+ bool use_dma;
+};
+
+/**
+ * struct marvell_nfc - stores Marvell NAND controller information
+ *
+ * @controller: Base controller structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @core_clk: Core clock
+ * @reg_clk: Registers clock
+ * @complete: Completion object to wait for NAND controller events
+ * @assigned_cs: Bitmask describing already assigned CS lines
+ * @chips: List containing all the NAND chips attached to
+ * this NAND controller
+ * @selected_chip: Currently selected target chip
+ * @caps: NAND controller capabilities for each compatible string
+ * @use_dma: Whetner DMA is used
+ * @dma_chan: DMA channel (NFCv1 only)
+ * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
+ */
+struct marvell_nfc {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *core_clk;
+ struct clk *reg_clk;
+ struct completion complete;
+ unsigned long assigned_cs;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ const struct marvell_nfc_caps *caps;
+
+ /* DMA (NFCv1 only) */
+ bool use_dma;
+ struct dma_chan *dma_chan;
+ u8 *dma_buf;
+};
+
+static inline struct marvell_nfc *to_marvell_nfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct marvell_nfc, controller);
+}
+
+/**
+ * struct marvell_nfc_timings - NAND controller timings expressed in NAND
+ * Controller clock cycles
+ *
+ * @tRP: ND_nRE pulse width
+ * @tRH: ND_nRE high duration
+ * @tWP: ND_nWE pulse time
+ * @tWH: ND_nWE high duration
+ * @tCS: Enable signal setup time
+ * @tCH: Enable signal hold time
+ * @tADL: Address to write data delay
+ * @tAR: ND_ALE low to ND_nRE low delay
+ * @tWHR: ND_nWE high to ND_nRE low for status read
+ * @tRHW: ND_nRE high duration, read to write delay
+ * @tR: ND_nWE high to ND_nRE low for read
+ */
+struct marvell_nfc_timings {
+ /* NDTR0 fields */
+ unsigned int tRP;
+ unsigned int tRH;
+ unsigned int tWP;
+ unsigned int tWH;
+ unsigned int tCS;
+ unsigned int tCH;
+ unsigned int tADL;
+ /* NDTR1 fields */
+ unsigned int tAR;
+ unsigned int tWHR;
+ unsigned int tRHW;
+ unsigned int tR;
+};
+
+/**
+ * Derives a duration in numbers of clock cycles.
+ *
+ * @ps: Duration in pico-seconds
+ * @period_ns: Clock period in nano-seconds
+ *
+ * Convert the duration in nano-seconds, then divide by the period and
+ * return the number of clock periods.
+ */
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
+#define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
+ period_ns))
+
+/**
+ * struct marvell_nfc_op - filled during the parsing of the ->exec_op()
+ * subop subset of instructions.
+ *
+ * @ndcb: Array of values written to NDCBx registers
+ * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @rdy_delay_ns: Optional delay after waiting for the RB pin
+ * @data_delay_ns: Optional delay after the data xfer
+ * @data_instr_idx: Index of the data instruction in the subop
+ * @data_instr: Pointer to the data instruction in the subop
+ */
+struct marvell_nfc_op {
+ u32 ndcb[4];
+ unsigned int cle_ale_delay_ns;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ unsigned int data_delay_ns;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/*
+ * Internal helper to conditionnally apply a delay (from the above structure,
+ * most of the time).
+ */
+static void cond_delay(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+/*
+ * The controller has many flags that could generate interrupts, most of them
+ * are disabled and polling is used. For the very slow signals, using interrupts
+ * may relax the CPU charge.
+ */
+static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 1 disables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 0 enables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
+}
+
+static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDSR);
+ writel_relaxed(int_mask, nfc->regs + NDSR);
+
+ return reg & int_mask;
+}
+
+static void marvell_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return;
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (force_8bit)
+ ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
+ else
+ ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ writel_relaxed(ndcr, nfc->regs + NDCR);
+}
+
+static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ /*
+ * The command is being processed, wait for the ND_RUN bit to be
+ * cleared by the NFC. If not, we must clear it by hand.
+ */
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
+ (val & NDCR_ND_RUN) == 0,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Any time a command has to be sent to the controller, the following sequence
+ * has to be followed:
+ * - call marvell_nfc_prepare_cmd()
+ * -> activate the ND_RUN bit that will kind of 'start a job'
+ * -> wait the signal indicating the NFC is waiting for a command
+ * - send the command (cmd and address cycles)
+ * - enventually send or receive the data
+ * - call marvell_nfc_end_cmd() with the corresponding flag
+ * -> wait the flag to be triggered or cancel the job with a timeout
+ *
+ * The following helpers are here to factorize the code a bit so that
+ * specialized functions responsible for executing the actual NAND
+ * operations do not have to replicate the same code blocks.
+ */
+static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr, val;
+ int ret;
+
+ /* Poll ND_RUN and clear NDSR before issuing any command */
+ ret = marvell_nfc_wait_ndrun(chip);
+ if (ret) {
+ dev_err(nfc->dev, "Last operation did not succeed\n");
+ return ret;
+ }
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
+
+ /* Assert ND_RUN bit and wait the NFC to be ready */
+ writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & NDSR_WRCMDREQ,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on WRCMDRE\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Command may be written, clear WRCMDREQ status bit */
+ writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static void marvell_nfc_send_cmd(struct nand_chip *chip,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
+ "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
+ (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
+ nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
+
+ writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
+ nfc->regs + NDCB0);
+ writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
+ writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
+
+ /*
+ * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
+ * fields are used (only available on NFCv2).
+ */
+ if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
+ NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
+ if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
+ writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
+ }
+}
+
+static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
+ const char *label)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & flag,
+ POLL_PERIOD, POLL_TIMEOUT);
+
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
+ label, val);
+ if (nfc->dma_chan)
+ dmaengine_terminate_all(nfc->dma_chan);
+ return ret;
+ }
+
+ /*
+ * DMA function uses this helper to poll on CMDD bits without wanting
+ * them to be cleared.
+ */
+ if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
+ return 0;
+
+ writel_relaxed(flag, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
+
+ return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
+}
+
+static int marvell_nfc_poll_status(struct marvell_nfc *nfc, u32 mask,
+ u32 expected_val, unsigned long timeout_ms)
+{
+ unsigned long limit;
+ u32 st;
+
+ limit = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ st = readl_relaxed(nfc->regs + NDSR);
+ if (st & NDSR_RDY(1))
+ st |= NDSR_RDY(0);
+
+ if ((st & mask) == expected_val)
+ return 0;
+
+ cpu_relax();
+ } while (time_after(limit, jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 pending;
+ int ret;
+
+ /* Timeout is expressed in ms */
+ if (!timeout_ms)
+ timeout_ms = IRQ_TIMEOUT;
+
+ if (mtd->oops_panic_write) {
+ ret = marvell_nfc_poll_status(nfc, NDSR_RDY(0),
+ NDSR_RDY(0),
+ timeout_ms);
+ } else {
+ init_completion(&nfc->complete);
+
+ marvell_nfc_enable_int(nfc, NDCR_RDYM);
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+ marvell_nfc_disable_int(nfc, NDCR_RDYM);
+ }
+ pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+
+ /*
+ * In case the interrupt was not served in the required time frame,
+ * check if the ISR was not served or if something went actually wrong.
+ */
+ if (!ret && !pending) {
+ dev_err(nfc->dev, "Timeout waiting for RB signal\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_select_target(struct nand_chip *chip,
+ unsigned int die_nr)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr_generic;
+
+ /*
+ * Reset the NDCR register to a clean state for this particular chip,
+ * also clear ND_RUN bit.
+ */
+ ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
+ NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
+ writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
+
+ /* Also reset the interrupt status register */
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+
+ if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
+ return;
+
+ writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
+ writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
+
+ nfc->selected_chip = chip;
+ marvell_nand->selected_die = die_nr;
+}
+
+static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
+{
+ struct marvell_nfc *nfc = dev_id;
+ u32 st = readl_relaxed(nfc->regs + NDSR);
+ u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
+
+ /*
+ * RDY interrupt mask is one bit in NDCR while there are two status
+ * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
+ */
+ if (st & NDSR_RDY(1))
+ st |= NDSR_RDY(0);
+
+ if (!(st & ien))
+ return IRQ_NONE;
+
+ marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
+
+ if (st & (NDSR_RDY(0) | NDSR_RDY(1)))
+ complete(&nfc->complete);
+
+ return IRQ_HANDLED;
+}
+
+/* HW ECC related functions */
+static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (!(ndcr & NDCR_ECC_EN)) {
+ writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
+
+ /*
+ * When enabling BCH, set threshold to 0 to always know the
+ * number of corrected bitflips.
+ */
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
+ writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
+ }
+}
+
+static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (ndcr & NDCR_ECC_EN) {
+ writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+ }
+}
+
+/* DMA related helpers */
+static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+/* Read/write PIO/DMA accessors */
+static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
+ enum dma_data_direction direction,
+ unsigned int len)
+{
+ unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+ dma_cookie_t cookie;
+ int ret;
+
+ marvell_nfc_enable_dma(nfc);
+ /* Prepare the DMA transfer */
+ sg_init_one(&sg, nfc->dma_buf, dma_len);
+ dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
+ direction == DMA_FROM_DEVICE ?
+ DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
+ return -ENXIO;
+ }
+
+ /* Do the task and wait for it to finish */
+ cookie = dmaengine_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return -EIO;
+
+ dma_async_issue_pending(nfc->dma_chan);
+ ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
+ dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ marvell_nfc_disable_dma(nfc);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
+ dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
+ dmaengine_terminate_all(nfc->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ memcpy(in + last_full_offset, tmp_buf, last_len);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ memcpy(tmp_buf, out + last_full_offset, last_len);
+ iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
+ u8 *data, int data_len,
+ u8 *spare, int spare_len,
+ u8 *ecc, int ecc_len,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bf;
+
+ /*
+ * Blank pages (all 0xFF) that have not been written may be recognized
+ * as bad if bitflips occur, so whenever an uncorrectable error occurs,
+ * check if the entire page (with ECC bytes) is actually blank or not.
+ */
+ if (!data)
+ data_len = 0;
+ if (!spare)
+ spare_len = 0;
+ if (!ecc)
+ ecc_len = 0;
+
+ bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
+ spare, spare_len, chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ return;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+}
+
+/*
+ * Check if a chunk is correct or not according to the hardware ECC engine.
+ * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
+ * mtd->ecc_stats.failure is not, the function will instead return a non-zero
+ * value indicating that a check on the emptyness of the subpage must be
+ * performed before actually declaring the subpage as "corrupted".
+ */
+static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int bf = 0;
+ u32 ndsr;
+
+ ndsr = readl_relaxed(nfc->regs + NDSR);
+
+ /* Check uncorrectable error flag */
+ if (ndsr & NDSR_UNCERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ /*
+ * Do not increment ->ecc_stats.failed now, instead, return a
+ * non-zero value to indicate that this chunk was apparently
+ * bad, and it should be check to see if it empty or not. If
+ * the chunk (with ECC bytes) is not declared empty, the calling
+ * function must increment the failure count.
+ */
+ return -EBADMSG;
+ }
+
+ /* Check correctable error flag */
+ if (ndsr & NDSR_CORERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
+ bf = NDSR_ERRCNT(ndsr);
+ else
+ bf = 1;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+
+ return 0;
+}
+
+/* Hamming read helpers */
+static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf,
+ bool raw, int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART),
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data/oob)");
+ if (ret)
+ return ret;
+
+ /*
+ * Read the page then the OOB area. Unlike what is shown in current
+ * documentation, spare bytes are protected by the ECC engine, and must
+ * be at the beginning of the OOB area or running this driver on legacy
+ * systems will prevent the discovery of the BBM/BBT.
+ */
+ if (nfc->use_dma) {
+ marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
+ lt->data_bytes + oob_bytes);
+ memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
+ memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
+ } else {
+ marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int max_bitflips = 0, ret;
+ u8 *raw_buf;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ marvell_nfc_enable_hw_ecc(chip);
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
+ page);
+ ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!ret)
+ return max_bitflips;
+
+ /*
+ * When ECC failures are detected, check if the full page has been
+ * written or not. Ignore the failure if it is actually empty.
+ */
+ raw_buf = kmalloc(full_sz, GFP_KERNEL);
+ if (!raw_buf)
+ return -ENOMEM;
+
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
+ lt->data_bytes, true, page);
+ marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
+ &max_bitflips);
+ kfree(raw_buf);
+
+ return max_bitflips;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->read_oob_raw() function
+ * also stands for ->read_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
+{
+ u8 *buf = nand_get_data_buf(chip);
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+/* Hamming write helpers */
+static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ const u8 *data_buf,
+ const u8 *oob_buf, bool raw,
+ int page)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN) |
+ NDCB0_CMD2(NAND_CMD_PAGEPROG) |
+ NDCB0_DBC,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ u8 status;
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Write the page then the OOB area */
+ if (nfc->use_dma) {
+ memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
+ memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
+ marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
+ lt->ecc_bytes + lt->spare_bytes);
+ } else {
+ marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ ret = marvell_nfc_wait_op(chip,
+ PSEC_TO_MSEC(sdr->tPROG_max));
+ if (ret)
+ return ret;
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ marvell_nfc_enable_hw_ecc(chip);
+ ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ false, page);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ return ret;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->write_oob_raw() function
+ * also stands for ->write_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
+
+ memset(buf, 0xFF, mtd->writesize);
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+/* BCH read helpers */
+static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ u8 *oob = chip->oob_poi;
+ int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int chunk;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update last chunk length */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Read data bytes*/
+ nand_change_read_column_op(chip, chunk * chunk_size,
+ buf + (lt->data_bytes * chunk),
+ data_len, false);
+
+ /* Read spare bytes */
+ nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
+ spare_len, false, false);
+
+ /* Read ECC bytes */
+ nand_read_data_op(chip, oob + ecc_offset +
+ (ALIGN(lt->ecc_bytes, 32) * chunk),
+ ecc_len, false, false);
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
+ u8 *data, unsigned int data_len,
+ u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int i, ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_LEN_OVRD,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return;
+
+ if (chunk == 0)
+ nfc_op.ndcb[0] |= NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART);
+
+ /*
+ * Trigger the monolithic read on the first chunk, then naked read on
+ * intermediate chunks and finally a last naked read on the last chunk.
+ */
+ if (chunk == 0)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+ else if (chunk < lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
+ else
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ /*
+ * According to the datasheet, when reading from NDDB
+ * with BCH enabled, after each 32 bytes reads, we
+ * have to make sure that the NDSR.RDDREQ bit is set.
+ *
+ * Drain the FIFO, 8 32-bit reads at a time, and skip
+ * the polling on the last read.
+ *
+ * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
+ */
+ for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data)");
+ marvell_nfc_xfer_data_in_pio(nfc, data,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ data += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+
+ for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (OOB)");
+ marvell_nfc_xfer_data_in_pio(nfc, spare,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ spare += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+}
+
+static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int data_len = lt->data_bytes, spare_len = lt->spare_bytes;
+ u8 *data = buf, *spare = chip->oob_poi;
+ int max_bitflips = 0;
+ u32 failure_mask = 0;
+ int chunk, ret;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
+ /*
+ * With BCH, OOB is not fully used (and thus not read entirely), not
+ * expected bytes could show up at the end of the OOB buffer if not
+ * explicitly erased.
+ */
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update length for the last chunk */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ /* Read the chunk and detect number of bitflips */
+ marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
+ if (ret)
+ failure_mask |= BIT(chunk);
+
+ data += data_len;
+ spare += spare_len;
+ }
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!failure_mask)
+ return max_bitflips;
+
+ /*
+ * Please note that dumping the ECC bytes during a normal read with OOB
+ * area would add a significant overhead as ECC bytes are "consumed" by
+ * the controller in normal mode and must be re-read in raw mode. To
+ * avoid dropping the performances, we prefer not to include them. The
+ * user should re-read the page in raw mode if ECC bytes are required.
+ */
+
+ /*
+ * In case there is any subpage read error, we usually re-read only ECC
+ * bytes in raw mode and check if the whole page is empty. In this case,
+ * it is normal that the ECC check failed and we just ignore the error.
+ *
+ * However, it has been empirically observed that for some layouts (e.g
+ * 2k page, 8b strength per 512B chunk), the controller tries to correct
+ * bits and may create itself bitflips in the erased area. To overcome
+ * this strange behavior, the whole page is re-read in raw mode, not
+ * only the ECC bytes.
+ */
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ int data_off_in_page, spare_off_in_page, ecc_off_in_page;
+ int data_off, spare_off, ecc_off;
+ int data_len, spare_len, ecc_len;
+
+ /* No failure reported for this chunk, move to the next one */
+ if (!(failure_mask & BIT(chunk)))
+ continue;
+
+ data_off_in_page = chunk * (lt->data_bytes + lt->spare_bytes +
+ lt->ecc_bytes);
+ spare_off_in_page = data_off_in_page +
+ (chunk < lt->full_chunk_cnt ? lt->data_bytes :
+ lt->last_data_bytes);
+ ecc_off_in_page = spare_off_in_page +
+ (chunk < lt->full_chunk_cnt ? lt->spare_bytes :
+ lt->last_spare_bytes);
+
+ data_off = chunk * lt->data_bytes;
+ spare_off = chunk * lt->spare_bytes;
+ ecc_off = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes +
+ (chunk * (lt->ecc_bytes + 2));
+
+ data_len = chunk < lt->full_chunk_cnt ? lt->data_bytes :
+ lt->last_data_bytes;
+ spare_len = chunk < lt->full_chunk_cnt ? lt->spare_bytes :
+ lt->last_spare_bytes;
+ ecc_len = chunk < lt->full_chunk_cnt ? lt->ecc_bytes :
+ lt->last_ecc_bytes;
+
+ /*
+ * Only re-read the ECC bytes, unless we are using the 2k/8b
+ * layout which is buggy in the sense that the ECC engine will
+ * try to correct data bytes anyway, creating bitflips. In this
+ * case, re-read the entire page.
+ */
+ if (lt->writesize == 2048 && lt->strength == 8) {
+ nand_change_read_column_op(chip, data_off_in_page,
+ buf + data_off, data_len,
+ false);
+ nand_change_read_column_op(chip, spare_off_in_page,
+ chip->oob_poi + spare_off, spare_len,
+ false);
+ }
+
+ nand_change_read_column_op(chip, ecc_off_in_page,
+ chip->oob_poi + ecc_off, ecc_len,
+ false);
+
+ /* Check the entire chunk (data + spare + ecc) for emptyness */
+ marvell_nfc_check_empty_chunk(chip, buf + data_off, data_len,
+ chip->oob_poi + spare_off, spare_len,
+ chip->oob_poi + ecc_off, ecc_len,
+ &max_bitflips);
+ }
+
+ return max_bitflips;
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct nand_chip *chip, int page)
+{
+ u8 *buf = nand_get_data_buf(chip);
+
+ return chip->ecc.read_page_raw(chip, buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob(struct nand_chip *chip, int page)
+{
+ u8 *buf = nand_get_data_buf(chip);
+
+ return chip->ecc.read_page(chip, buf, true, page);
+}
+
+/* BCH write helpers */
+static int marvell_nfc_hw_ecc_bch_write_page_raw(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int spare_offset = 0;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int chunk;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Point to the column of the next chunk */
+ nand_change_write_column_op(chip, chunk * full_chunk_size,
+ NULL, 0, false);
+
+ /* Write the data */
+ nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
+ data_len, false);
+
+ if (!oob_required)
+ continue;
+
+ /* Write the spare bytes */
+ if (spare_len)
+ nand_write_data_op(chip, chip->oob_poi + spare_offset,
+ spare_len, false);
+
+ /* Write the ECC bytes */
+ if (ecc_len)
+ nand_write_data_op(chip, chip->oob_poi + ecc_offset,
+ ecc_len, false);
+
+ spare_offset += spare_len;
+ ecc_offset += ALIGN(ecc_len, 32);
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int
+marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ const u8 *data, unsigned int data_len,
+ const u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ u32 xtype;
+ int ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ /*
+ * First operation dispatches the CMD_SEQIN command, issue the address
+ * cycles and asks for the first chunk of data.
+ * All operations in the middle (if any) will issue a naked write and
+ * also ask for data.
+ * Last operation (if any) asks for the last chunk of data through a
+ * last naked write.
+ */
+ if (chunk == 0) {
+ if (lt->nchunks == 1)
+ xtype = XTYPE_MONOLITHIC_RW;
+ else
+ xtype = XTYPE_WRITE_DISPATCH;
+
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN);
+ nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
+ nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
+ } else if (chunk < lt->nchunks - 1) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
+ } else {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ }
+
+ /* Always dispatch the PAGEPROG command on the last chunk */
+ if (chunk == lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Transfer the contents */
+ iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
+ iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ const u8 *data = buf;
+ const u8 *spare = chip->oob_poi;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int chunk, ret;
+ u8 status;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
+ /* Spare data will be written anyway, so clear it to avoid garbage */
+ if (!oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ data += data_len;
+ spare += spare_len;
+
+ /*
+ * Waiting only for CMDD or PAGED is not enough, ECC are
+ * partially written. No flag is set once the operation is
+ * really finished but the ND_RUN bit is cleared, so wait for it
+ * before stepping into the next command.
+ */
+ marvell_nfc_wait_ndrun(chip);
+ }
+
+ ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max));
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (ret)
+ return ret;
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
+
+ memset(buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page_raw(chip, buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
+
+ memset(buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page(chip, buf, true, page);
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void marvell_nfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ bool first_cmd = true;
+ unsigned int op_id;
+ int i;
+
+ /* Reset the input structure as most of its fields will be OR'ed */
+ memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ int len;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd)
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD1(instr->ctx.cmd.opcode);
+ else
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD2(instr->ctx.cmd.opcode) |
+ NDCB0_DBC;
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ nfc_op->ndcb[1] |= addrs[i] << (8 * i);
+
+ if (naddrs >= 5)
+ nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
+ if (naddrs >= 6)
+ nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
+ if (naddrs == 7)
+ nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct nand_op_instr *instr = nfc_op->data_instr;
+ unsigned int op_id = nfc_op->data_instr_idx;
+ unsigned int len = nand_subop_get_data_len(subop, op_id);
+ unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
+ bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
+ int ret;
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, true);
+
+ if (reading) {
+ u8 *in = instr->ctx.data.buf.in + offset;
+
+ ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
+ } else {
+ const u8 *out = instr->ctx.data.buf.out + offset;
+
+ ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
+ }
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, false);
+
+ return ret;
+}
+
+static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ bool reading;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ if (!reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (!reading) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Naked access are different in that they need to be flagged as naked
+ * by the controller. Reset the controller registers fields that inform
+ * on the type and refill them according to the ongoing operation.
+ */
+ nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
+ NDCB0_CMD_XTYPE(XTYPE_MASK));
+ switch (subop->instrs[0].type) {
+ case NAND_OP_CMD_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ default:
+ /* This should never happen */
+ break;
+ }
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ if (!nfc_op.data_instr) {
+ ret = marvell_nfc_wait_cmdd(chip);
+ cond_delay(nfc_op.cle_ale_delay_ns);
+ return ret;
+ }
+
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return ret;
+}
+
+static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading ID");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading status");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
+ /* Monolithic reads/writes */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ /* Naked commands */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
+ /* Naked commands not supported, use a function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_erase_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int marvell_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ if (!check_only)
+ marvell_nfc_select_target(chip, op->cs);
+
+ if (nfc->caps->is_nfcv2)
+ return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
+ op, check_only);
+ else
+ return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
+ op, check_only);
+}
+
+/*
+ * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
+ * usable.
+ */
+static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
+ lt->last_ecc_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * Bootrom looks in bytes 0 & 5 for bad blocks for the
+ * 4KB page / 4bit BCH combination.
+ */
+ if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
+ oobregion->offset = 6;
+ else
+ oobregion->offset = 2;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
+ .ecc = marvell_nand_ooblayout_ecc,
+ .free = marvell_nand_ooblayout_free,
+};
+
+static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *l;
+ int i;
+
+ if (!nfc->caps->is_nfcv2 &&
+ (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
+ dev_err(nfc->dev,
+ "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
+ mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
+ return -ENOTSUPP;
+ }
+
+ to_marvell_nand(chip)->layout = NULL;
+ for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
+ l = &marvell_nfc_layouts[i];
+ if (mtd->writesize == l->writesize &&
+ ecc->size == l->chunk && ecc->strength == l->strength) {
+ to_marvell_nand(chip)->layout = l;
+ break;
+ }
+ }
+
+ if (!to_marvell_nand(chip)->layout ||
+ (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
+ dev_err(nfc->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ ecc->strength, mtd->writesize);
+ return -ENOTSUPP;
+ }
+
+ /* Special care for the layout 2k/8-bit/512B */
+ if (l->writesize == 2048 && l->strength == 8) {
+ if (mtd->oobsize < 128) {
+ dev_err(nfc->dev, "Requested layout needs at least 128 OOB bytes\n");
+ return -ENOTSUPP;
+ } else {
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ }
+ }
+
+ mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
+ ecc->steps = l->nchunks;
+ ecc->size = l->data_bytes;
+
+ if (ecc->strength == 1) {
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
+ ecc->read_oob = ecc->read_oob_raw;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
+ ecc->write_oob = ecc->write_oob_raw;
+ } else {
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ ecc->strength = 16;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
+ ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
+ ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
+ }
+
+ return 0;
+}
+
+static int marvell_nand_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+ (!ecc->size || !ecc->strength)) {
+ if (requirements->step_size && requirements->strength) {
+ ecc->size = requirements->step_size;
+ ecc->strength = requirements->strength;
+ } else {
+ dev_info(nfc->dev,
+ "No minimum ECC strength, using 1b/512B\n");
+ ecc->size = 512;
+ ecc->strength = 1;
+ }
+ }
+
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = marvell_nand_hw_ecc_controller_init(mtd, ecc);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
+ mtd->writesize != SZ_2K) {
+ dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
+ mtd->writesize);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ unsigned int period_ns = 1000000000 / clk_get_rate(nfc->core_clk) * 2;
+ const struct nand_sdr_timings *sdr;
+ struct marvell_nfc_timings nfc_tmg;
+ int read_delay;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles, which is half of the
+ * frequency of the accessible ECC clock retrieved by clk_get_rate().
+ * This is not written anywhere in the datasheet but was observed
+ * with an oscilloscope.
+ *
+ * NFC datasheet gives equations from which thoses calculations
+ * are derived, they tend to be slightly more restrictives than the
+ * given core timings and may improve the overall speed.
+ */
+ nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
+ nfc_tmg.tRH = nfc_tmg.tRP;
+ nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
+ nfc_tmg.tWH = nfc_tmg.tWP;
+ nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
+ nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
+ nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
+ /*
+ * Read delay is the time of propagation from SoC pins to NFC internal
+ * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
+ * EDO mode, an additional delay of tRH must be taken into account so
+ * the data is sampled on the falling edge instead of the rising edge.
+ */
+ read_delay = sdr->tRC_min >= 30000 ?
+ MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
+
+ nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
+ /*
+ * tWHR and tRHW are supposed to be read to write delays (and vice
+ * versa) but in some cases, ie. when doing a change column, they must
+ * be greater than that to be sure tCCS delay is respected.
+ */
+ nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
+ period_ns) - 2,
+ nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
+ period_ns);
+
+ /*
+ * NFCv2: Use WAIT_MODE (wait for RB line), do not rely only on delays.
+ * NFCv1: No WAIT_MODE, tR must be maximal.
+ */
+ if (nfc->caps->is_nfcv2) {
+ nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
+ } else {
+ nfc_tmg.tR = TO_CYCLES64(sdr->tWB_max + sdr->tR_max,
+ period_ns);
+ if (nfc_tmg.tR + 3 > nfc_tmg.tCH)
+ nfc_tmg.tR = nfc_tmg.tCH - 3;
+ else
+ nfc_tmg.tR = 0;
+ }
+
+ if (chipnr < 0)
+ return 0;
+
+ marvell_nand->ndtr0 =
+ NDTR0_TRP(nfc_tmg.tRP) |
+ NDTR0_TRH(nfc_tmg.tRH) |
+ NDTR0_ETRP(nfc_tmg.tRP) |
+ NDTR0_TWP(nfc_tmg.tWP) |
+ NDTR0_TWH(nfc_tmg.tWH) |
+ NDTR0_TCS(nfc_tmg.tCS) |
+ NDTR0_TCH(nfc_tmg.tCH);
+
+ marvell_nand->ndtr1 =
+ NDTR1_TAR(nfc_tmg.tAR) |
+ NDTR1_TWHR(nfc_tmg.tWHR) |
+ NDTR1_TR(nfc_tmg.tR);
+
+ if (nfc->caps->is_nfcv2) {
+ marvell_nand->ndtr0 |=
+ NDTR0_RD_CNT_DEL(read_delay) |
+ NDTR0_SELCNTR |
+ NDTR0_TADL(nfc_tmg.tADL);
+
+ marvell_nand->ndtr1 |=
+ NDTR1_TRHW(nfc_tmg.tRHW) |
+ NDTR1_WAIT_MODE;
+ }
+
+ /*
+ * Reset nfc->selected_chip so the next command will cause the timing
+ * registers to be updated in marvell_nfc_select_target().
+ */
+ nfc->selected_chip = NULL;
+
+ return 0;
+}
+
+static int marvell_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(nfc->dev);
+ int ret;
+
+ if (pdata && pdata->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Save the chip-specific fields of NDCR */
+ marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
+ if (chip->options & NAND_BUSWIDTH_16)
+ marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ /*
+ * On small page NANDs, only one cycle is needed to pass the
+ * column address.
+ */
+ if (mtd->writesize <= 512) {
+ marvell_nand->addr_cyc = 1;
+ } else {
+ marvell_nand->addr_cyc = 2;
+ marvell_nand->ndcr |= NDCR_RA_START;
+ }
+
+ /*
+ * Now add the number of cycles needed to pass the row
+ * address.
+ *
+ * Addressing a chip using CS 2 or 3 should also need the third row
+ * cycle but due to inconsistance in the documentation and lack of
+ * hardware to test this situation, this case is not supported.
+ */
+ if (chip->options & NAND_ROW_ADDR_3)
+ marvell_nand->addr_cyc += 3;
+ else
+ marvell_nand->addr_cyc += 2;
+
+ if (pdata) {
+ chip->ecc.size = pdata->ecc_step_size;
+ chip->ecc.strength = pdata->ecc_strength;
+ }
+
+ ret = marvell_nand_ecc_init(mtd, &chip->ecc);
+ if (ret) {
+ dev_err(nfc->dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ /*
+ * Subpage write not available with hardware ECC, prohibit also
+ * subpage read as in userspace subpage access would still be
+ * allowed and subpage write, if used, would lead to numerous
+ * uncorrectable ECC errors.
+ */
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "main-storage";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nfc->dev),
+ marvell_nand->sels[0].cs);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops marvell_nand_controller_ops = {
+ .attach_chip = marvell_nand_attach_chip,
+ .exec_op = marvell_nfc_exec_op,
+ .setup_interface = marvell_nfc_setup_interface,
+};
+
+static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
+ struct device_node *np)
+{
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
+ struct marvell_nand_chip *marvell_nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs, rb;
+
+ /*
+ * The legacy "num-cs" property indicates the number of CS on the only
+ * chip connected to the controller (legacy bindings does not support
+ * more than one chip). The CS and RB pins are always the #0.
+ *
+ * When not using legacy bindings, a couple of "reg" and "nand-rb"
+ * properties must be filled. For each chip, expressed as a subnode,
+ * "reg" points to the CS lines and "nand-rb" to the RB line.
+ */
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ nsels = 1;
+ } else {
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ dev_err(dev, "missing/invalid reg property\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Alloc the nand chip structure */
+ marvell_nand = devm_kzalloc(dev,
+ struct_size(marvell_nand, sels, nsels),
+ GFP_KERNEL);
+ if (!marvell_nand) {
+ dev_err(dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ marvell_nand->nsels = nsels;
+ marvell_nand->selected_die = -1;
+
+ for (i = 0; i < nsels; i++) {
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * Legacy bindings use the CS lines in natural
+ * order (0, 1, ...)
+ */
+ cs = i;
+ } else {
+ /* Retrieve CS id */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (cs >= nfc->caps->max_cs_nb) {
+ dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
+ cs, nfc->caps->max_cs_nb);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ /*
+ * The cs variable represents the chip select id, which must be
+ * converted in bit fields for NDCB0 and NDCB2 to select the
+ * right chip. Unfortunately, due to a lack of information on
+ * the subject and incoherent documentation, the user should not
+ * use CS1 and CS3 at all as asserting them is not supported in
+ * a reliable way (due to multiplexing inside ADDR5 field).
+ */
+ marvell_nand->sels[i].cs = cs;
+ switch (cs) {
+ case 0:
+ case 2:
+ marvell_nand->sels[i].ndcb0_csel = 0;
+ break;
+ case 1:
+ case 3:
+ marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Retrieve RB id */
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /* Legacy bindings always use RB #0 */
+ rb = 0;
+ } else {
+ ret = of_property_read_u32_index(np, "nand-rb", i,
+ &rb);
+ if (ret) {
+ dev_err(dev,
+ "could not retrieve RB property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (rb >= nfc->caps->max_rb_nb) {
+ dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
+ rb, nfc->caps->max_rb_nb);
+ return -EINVAL;
+ }
+
+ marvell_nand->sels[i].rb = rb;
+ }
+
+ chip = &marvell_nand->chip;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ if (of_property_read_bool(np, "marvell,nand-keep-config"))
+ chip->options |= NAND_KEEP_TIMINGS;
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ /*
+ * Save a reference value for timing registers before
+ * ->setup_interface() is called.
+ */
+ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
+ marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
+
+ chip->options |= NAND_BUSWIDTH_AUTO;
+
+ ret = nand_scan(chip, marvell_nand->nsels);
+ if (ret) {
+ dev_err(dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ if (pdata)
+ /* Legacy bindings support only one chip */
+ ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ else
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&marvell_nand->node, &nfc->chips);
+
+ return 0;
+}
+
+static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
+{
+ struct marvell_nand_chip *entry, *temp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
+ chip = &entry->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&entry->node);
+ }
+}
+
+static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = nfc->caps->max_cs_nb;
+ int nchips;
+ int ret;
+
+ if (!np)
+ nchips = 1;
+ else
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
+ max_cs);
+ return -EINVAL;
+ }
+
+ /*
+ * Legacy bindings do not use child nodes to exhibit NAND chip
+ * properties and layout. Instead, NAND properties are mixed with the
+ * controller ones, and partitions are defined as direct subnodes of the
+ * NAND controller node.
+ */
+ if (nfc->caps->legacy_of_bindings) {
+ ret = marvell_nand_chip_init(dev, nfc, np);
+ return ret;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = marvell_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ goto cleanup_chips;
+ }
+ }
+
+ return 0;
+
+cleanup_chips:
+ marvell_nand_chips_cleanup(nfc);
+
+ return ret;
+}
+
+static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
+{
+ struct platform_device *pdev = container_of(nfc->dev,
+ struct platform_device,
+ dev);
+ struct dma_slave_config config = {};
+ struct resource *r;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PXA_DMA)) {
+ dev_warn(nfc->dev,
+ "DMA not enabled in configuration\n");
+ return -ENOTSUPP;
+ }
+
+ ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ nfc->dma_chan = dma_request_chan(nfc->dev, "data");
+ if (IS_ERR(nfc->dma_chan)) {
+ ret = PTR_ERR(nfc->dma_chan);
+ nfc->dma_chan = NULL;
+ return dev_err_probe(nfc->dev, ret, "DMA channel request failed\n");
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -ENXIO;
+ goto release_channel;
+ }
+
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = r->start + NDDB;
+ config.dst_addr = r->start + NDDB;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(nfc->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(nfc->dev, "Failed to configure DMA channel\n");
+ goto release_channel;
+ }
+
+ /*
+ * DMA must act on length multiple of 32 and this length may be
+ * bigger than the destination buffer. Use this buffer instead
+ * for DMA transfers and then copy the desired amount of data to
+ * the provided buffer.
+ */
+ nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!nfc->dma_buf) {
+ ret = -ENOMEM;
+ goto release_channel;
+ }
+
+ nfc->use_dma = true;
+
+ return 0;
+
+release_channel:
+ dma_release_channel(nfc->dma_chan);
+ nfc->dma_chan = NULL;
+
+ return ret;
+}
+
+static void marvell_nfc_reset(struct marvell_nfc *nfc)
+{
+ /*
+ * ECC operations and interruptions are only enabled when specifically
+ * needed. ECC shall not be activated in the early stages (fails probe).
+ * Arbiter flag, even if marked as "reserved", must be set (empirical).
+ * SPARE_EN bit must always be set or ECC bytes will not be at the same
+ * offset in the read page and this will fail the protection.
+ */
+ writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
+ NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
+ writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+}
+
+static int marvell_nfc_init(struct marvell_nfc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node;
+
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller, gated clocks and reset bits to avoid being bootloader
+ * dependent. This is done through the use of the System Functions
+ * registers.
+ */
+ if (nfc->caps->need_system_controller) {
+ struct regmap *sysctrl_base =
+ syscon_regmap_lookup_by_phandle(np,
+ "marvell,system-controller");
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX,
+ GENCONF_SOC_DEVICE_MUX_NFC_EN |
+ GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
+ GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
+ GENCONF_SOC_DEVICE_MUX_NFC_INT_EN);
+
+ regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL,
+ GENCONF_CLK_GATING_CTRL_ND_GATE,
+ GENCONF_CLK_GATING_CTRL_ND_GATE);
+ }
+
+ /* Configure the DMA if appropriate */
+ if (!nfc->caps->is_nfcv2)
+ marvell_nfc_init_dma(nfc);
+
+ marvell_nfc_reset(nfc);
+
+ return 0;
+}
+
+static int marvell_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct marvell_nfc *nfc;
+ int ret;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
+ GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_controller_init(&nfc->controller);
+ nfc->controller.ops = &marvell_nand_controller_ops;
+ INIT_LIST_HEAD(&nfc->chips);
+
+ nfc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ nfc->core_clk = devm_clk_get(&pdev->dev, "core");
+
+ /* Managed the legacy case (when the first clock was not named) */
+ if (nfc->core_clk == ERR_PTR(-ENOENT))
+ nfc->core_clk = devm_clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(nfc->core_clk))
+ return PTR_ERR(nfc->core_clk);
+
+ ret = clk_prepare_enable(nfc->core_clk);
+ if (ret)
+ return ret;
+
+ nfc->reg_clk = devm_clk_get(&pdev->dev, "reg");
+ if (IS_ERR(nfc->reg_clk)) {
+ if (PTR_ERR(nfc->reg_clk) != -ENOENT) {
+ ret = PTR_ERR(nfc->reg_clk);
+ goto unprepare_core_clk;
+ }
+
+ nfc->reg_clk = NULL;
+ }
+
+ ret = clk_prepare_enable(nfc->reg_clk);
+ if (ret)
+ goto unprepare_core_clk;
+
+ marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+ ret = devm_request_irq(dev, irq, marvell_nfc_isr,
+ 0, "marvell-nfc", nfc);
+ if (ret)
+ goto unprepare_reg_clk;
+
+ /* Get NAND controller capabilities */
+ if (pdev->id_entry)
+ nfc->caps = (void *)pdev->id_entry->driver_data;
+ else
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+
+ if (!nfc->caps) {
+ dev_err(dev, "Could not retrieve NFC caps\n");
+ ret = -EINVAL;
+ goto unprepare_reg_clk;
+ }
+
+ /* Init the controller and then probe the chips */
+ ret = marvell_nfc_init(nfc);
+ if (ret)
+ goto unprepare_reg_clk;
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = marvell_nand_chips_init(dev, nfc);
+ if (ret)
+ goto release_dma;
+
+ return 0;
+
+release_dma:
+ if (nfc->use_dma)
+ dma_release_channel(nfc->dma_chan);
+unprepare_reg_clk:
+ clk_disable_unprepare(nfc->reg_clk);
+unprepare_core_clk:
+ clk_disable_unprepare(nfc->core_clk);
+
+ return ret;
+}
+
+static int marvell_nfc_remove(struct platform_device *pdev)
+{
+ struct marvell_nfc *nfc = platform_get_drvdata(pdev);
+
+ marvell_nand_chips_cleanup(nfc);
+
+ if (nfc->use_dma) {
+ dmaengine_terminate_all(nfc->dma_chan);
+ dma_release_channel(nfc->dma_chan);
+ }
+
+ clk_disable_unprepare(nfc->reg_clk);
+ clk_disable_unprepare(nfc->core_clk);
+
+ return 0;
+}
+
+static int __maybe_unused marvell_nfc_suspend(struct device *dev)
+{
+ struct marvell_nfc *nfc = dev_get_drvdata(dev);
+ struct marvell_nand_chip *chip;
+
+ list_for_each_entry(chip, &nfc->chips, node)
+ marvell_nfc_wait_ndrun(&chip->chip);
+
+ clk_disable_unprepare(nfc->reg_clk);
+ clk_disable_unprepare(nfc->core_clk);
+
+ return 0;
+}
+
+static int __maybe_unused marvell_nfc_resume(struct device *dev)
+{
+ struct marvell_nfc *nfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(nfc->core_clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(nfc->reg_clk);
+ if (ret < 0) {
+ clk_disable_unprepare(nfc->core_clk);
+ return ret;
+ }
+
+ /*
+ * Reset nfc->selected_chip so the next command will cause the timing
+ * registers to be restored in marvell_nfc_select_target().
+ */
+ nfc->selected_chip = NULL;
+
+ /* Reset registers that have lost their contents */
+ marvell_nfc_reset(nfc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops marvell_nfc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(marvell_nfc_suspend, marvell_nfc_resume)
+};
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .use_dma = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .legacy_of_bindings = true,
+ .use_dma = true,
+};
+
+static const struct platform_device_id marvell_nfc_platform_ids[] = {
+ {
+ .name = "pxa3xx-nand",
+ .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
+
+static const struct of_device_id marvell_nfc_of_ids[] = {
+ {
+ .compatible = "marvell,armada-8k-nand-controller",
+ .data = &marvell_armada_8k_nfc_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand-controller",
+ .data = &marvell_armada370_nfc_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand-controller",
+ .data = &marvell_pxa3xx_nfc_caps,
+ },
+ /* Support for old/deprecated bindings: */
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = &marvell_armada_8k_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = &marvell_armada370_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = &marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
+
+static struct platform_driver marvell_nfc_driver = {
+ .driver = {
+ .name = "marvell-nfc",
+ .of_match_table = marvell_nfc_of_ids,
+ .pm = &marvell_nfc_pm_ops,
+ },
+ .id_table = marvell_nfc_platform_ids,
+ .probe = marvell_nfc_probe,
+ .remove = marvell_nfc_remove,
+};
+module_platform_driver(marvell_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Marvell NAND controller driver");
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
new file mode 100644
index 000000000..6bb0fca4a
--- /dev/null
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -0,0 +1,1481 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Amlogic Meson Nand Flash Controller Driver
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Liang Yang <liang.yang@amlogic.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sched/task_stack.h>
+
+#define NFC_REG_CMD 0x00
+#define NFC_CMD_IDLE (0xc << 14)
+#define NFC_CMD_CLE (0x5 << 14)
+#define NFC_CMD_ALE (0x6 << 14)
+#define NFC_CMD_ADL ((0 << 16) | (3 << 20))
+#define NFC_CMD_ADH ((1 << 16) | (3 << 20))
+#define NFC_CMD_AIL ((2 << 16) | (3 << 20))
+#define NFC_CMD_AIH ((3 << 16) | (3 << 20))
+#define NFC_CMD_SEED ((8 << 16) | (3 << 20))
+#define NFC_CMD_M2N ((0 << 17) | (2 << 20))
+#define NFC_CMD_N2M ((1 << 17) | (2 << 20))
+#define NFC_CMD_RB BIT(20)
+#define NFC_CMD_SCRAMBLER_ENABLE BIT(19)
+#define NFC_CMD_SCRAMBLER_DISABLE 0
+#define NFC_CMD_SHORTMODE_DISABLE 0
+#define NFC_CMD_RB_INT BIT(14)
+
+#define NFC_CMD_GET_SIZE(x) (((x) >> 22) & GENMASK(4, 0))
+
+#define NFC_REG_CFG 0x04
+#define NFC_REG_DADR 0x08
+#define NFC_REG_IADR 0x0c
+#define NFC_REG_BUF 0x10
+#define NFC_REG_INFO 0x14
+#define NFC_REG_DC 0x18
+#define NFC_REG_ADR 0x1c
+#define NFC_REG_DL 0x20
+#define NFC_REG_DH 0x24
+#define NFC_REG_CADR 0x28
+#define NFC_REG_SADR 0x2c
+#define NFC_REG_PINS 0x30
+#define NFC_REG_VER 0x38
+
+#define NFC_RB_IRQ_EN BIT(21)
+
+#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
+ ( \
+ (cmd_dir) | \
+ ((ran) << 19) | \
+ ((bch) << 14) | \
+ ((short_mode) << 13) | \
+ (((page_size) & 0x7f) << 6) | \
+ ((pages) & 0x3f) \
+ )
+
+#define GENCMDDADDRL(adl, addr) ((adl) | ((addr) & 0xffff))
+#define GENCMDDADDRH(adh, addr) ((adh) | (((addr) >> 16) & 0xffff))
+#define GENCMDIADDRL(ail, addr) ((ail) | ((addr) & 0xffff))
+#define GENCMDIADDRH(aih, addr) ((aih) | (((addr) >> 16) & 0xffff))
+
+#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N)
+#define DMA_ADDR_ALIGN 8
+
+#define ECC_CHECK_RETURN_FF (-1)
+
+#define NAND_CE0 (0xe << 10)
+#define NAND_CE1 (0xd << 10)
+
+#define DMA_BUSY_TIMEOUT 0x100000
+#define CMD_FIFO_EMPTY_TIMEOUT 1000
+
+#define MAX_CE_NUM 2
+
+/* eMMC clock register, misc control */
+#define CLK_SELECT_NAND BIT(31)
+
+#define NFC_CLK_CYCLE 6
+
+/* nand flash controller delay 3 ns */
+#define NFC_DEFAULT_DELAY 3000
+
+#define ROW_ADDER(page, index) (((page) >> (8 * (index))) & 0xff)
+#define MAX_CYCLE_ADDRS 5
+#define DIRREAD 1
+#define DIRWRITE 0
+
+#define ECC_PARITY_BCH8_512B 14
+#define ECC_COMPLETE BIT(31)
+#define ECC_ERR_CNT(x) (((x) >> 24) & GENMASK(5, 0))
+#define ECC_ZERO_CNT(x) (((x) >> 16) & GENMASK(5, 0))
+#define ECC_UNCORRECTABLE 0x3f
+
+#define PER_INFO_BYTE 8
+
+struct meson_nfc_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ unsigned long clk_rate;
+ unsigned long level1_divider;
+ u32 bus_timing;
+ u32 twb;
+ u32 tadl;
+ u32 tbers_max;
+
+ u32 bch_mode;
+ u8 *data_buf;
+ __le64 *info_buf;
+ u32 nsels;
+ u8 sels[];
+};
+
+struct meson_nand_ecc {
+ u32 bch;
+ u32 strength;
+};
+
+struct meson_nfc_data {
+ const struct nand_ecc_caps *ecc_caps;
+};
+
+struct meson_nfc_param {
+ u32 chip_select;
+ u32 rb_select;
+};
+
+struct nand_rw_cmd {
+ u32 cmd0;
+ u32 addrs[MAX_CYCLE_ADDRS];
+ u32 cmd1;
+};
+
+struct nand_timing {
+ u32 twb;
+ u32 tadl;
+ u32 tbers_max;
+};
+
+struct meson_nfc {
+ struct nand_controller controller;
+ struct clk *core_clk;
+ struct clk *device_clk;
+ struct clk *phase_tx;
+ struct clk *phase_rx;
+
+ unsigned long clk_rate;
+ u32 bus_timing;
+
+ struct device *dev;
+ void __iomem *reg_base;
+ struct regmap *reg_clk;
+ struct completion completion;
+ struct list_head chips;
+ const struct meson_nfc_data *data;
+ struct meson_nfc_param param;
+ struct nand_timing timing;
+ union {
+ int cmd[32];
+ struct nand_rw_cmd rw;
+ } cmdfifo;
+
+ dma_addr_t daddr;
+ dma_addr_t iaddr;
+ u32 info_bytes;
+
+ unsigned long assigned_cs;
+};
+
+enum {
+ NFC_ECC_BCH8_1K = 2,
+ NFC_ECC_BCH24_1K,
+ NFC_ECC_BCH30_1K,
+ NFC_ECC_BCH40_1K,
+ NFC_ECC_BCH50_1K,
+ NFC_ECC_BCH60_1K,
+};
+
+#define MESON_ECC_DATA(b, s) { .bch = (b), .strength = (s)}
+
+static struct meson_nand_ecc meson_ecc[] = {
+ MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8),
+ MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24),
+ MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30),
+ MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40),
+ MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50),
+ MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60),
+};
+
+static int meson_nand_calc_ecc_bytes(int step_size, int strength)
+{
+ int ecc_bytes;
+
+ if (step_size == 512 && strength == 8)
+ return ECC_PARITY_BCH8_512B;
+
+ ecc_bytes = DIV_ROUND_UP(strength * fls(step_size * 8), 8);
+ ecc_bytes = ALIGN(ecc_bytes, 2);
+
+ return ecc_bytes;
+}
+
+NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps,
+ meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60);
+NAND_ECC_CAPS_SINGLE(meson_axg_ecc_caps,
+ meson_nand_calc_ecc_bytes, 1024, 8);
+
+static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct meson_nfc_nand_chip, nand);
+}
+
+static void meson_nfc_select_chip(struct nand_chip *nand, int chip)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret, value;
+
+ if (chip < 0 || WARN_ON_ONCE(chip >= meson_chip->nsels))
+ return;
+
+ nfc->param.chip_select = meson_chip->sels[chip] ? NAND_CE1 : NAND_CE0;
+ nfc->param.rb_select = nfc->param.chip_select;
+ nfc->timing.twb = meson_chip->twb;
+ nfc->timing.tadl = meson_chip->tadl;
+ nfc->timing.tbers_max = meson_chip->tbers_max;
+
+ if (nfc->clk_rate != meson_chip->clk_rate) {
+ ret = clk_set_rate(nfc->device_clk, meson_chip->clk_rate);
+ if (ret) {
+ dev_err(nfc->dev, "failed to set clock rate\n");
+ return;
+ }
+ nfc->clk_rate = meson_chip->clk_rate;
+ }
+ if (nfc->bus_timing != meson_chip->bus_timing) {
+ value = (NFC_CLK_CYCLE - 1) | (meson_chip->bus_timing << 5);
+ writel(value, nfc->reg_base + NFC_REG_CFG);
+ writel((1 << 31), nfc->reg_base + NFC_REG_CMD);
+ nfc->bus_timing = meson_chip->bus_timing;
+ }
+}
+
+static void meson_nfc_cmd_idle(struct meson_nfc *nfc, u32 time)
+{
+ writel(nfc->param.chip_select | NFC_CMD_IDLE | (time & 0x3ff),
+ nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_cmd_seed(struct meson_nfc *nfc, u32 seed)
+{
+ writel(NFC_CMD_SEED | (0xc2 + (seed & 0x7fff)),
+ nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
+ int scrambler)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u32 bch = meson_chip->bch_mode, cmd;
+ int len = mtd->writesize, pagesize, pages;
+
+ pagesize = nand->ecc.size;
+
+ if (raw) {
+ len = mtd->writesize + mtd->oobsize;
+ cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ return;
+ }
+
+ pages = len / nand->ecc.size;
+
+ cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch,
+ NFC_CMD_SHORTMODE_DISABLE, pagesize, pages);
+
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_drain_cmd(struct meson_nfc *nfc)
+{
+ /*
+ * Insert two commands to make sure all valid commands are finished.
+ *
+ * The Nand flash controller is designed as two stages pipleline -
+ * a) fetch and b) excute.
+ * There might be cases when the driver see command queue is empty,
+ * but the Nand flash controller still has two commands buffered,
+ * one is fetched into NFC request queue (ready to run), and another
+ * is actively executing. So pushing 2 "IDLE" commands guarantees that
+ * the pipeline is emptied.
+ */
+ meson_nfc_cmd_idle(nfc, 0);
+ meson_nfc_cmd_idle(nfc, 0);
+}
+
+static int meson_nfc_wait_cmd_finish(struct meson_nfc *nfc,
+ unsigned int timeout_ms)
+{
+ u32 cmd_size = 0;
+ int ret;
+
+ /* wait cmd fifo is empty */
+ ret = readl_relaxed_poll_timeout(nfc->reg_base + NFC_REG_CMD, cmd_size,
+ !NFC_CMD_GET_SIZE(cmd_size),
+ 10, timeout_ms * 1000);
+ if (ret)
+ dev_err(nfc->dev, "wait for empty CMD FIFO time out\n");
+
+ return ret;
+}
+
+static int meson_nfc_wait_dma_finish(struct meson_nfc *nfc)
+{
+ meson_nfc_drain_cmd(nfc);
+
+ return meson_nfc_wait_cmd_finish(nfc, DMA_BUSY_TIMEOUT);
+}
+
+static u8 *meson_nfc_oob_ptr(struct nand_chip *nand, int i)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int len;
+
+ len = nand->ecc.size * (i + 1) + (nand->ecc.bytes + 2) * i;
+
+ return meson_chip->data_buf + len;
+}
+
+static u8 *meson_nfc_data_ptr(struct nand_chip *nand, int i)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int len, temp;
+
+ temp = nand->ecc.size + nand->ecc.bytes;
+ len = (temp + 2) * i;
+
+ return meson_chip->data_buf + len;
+}
+
+static void meson_nfc_get_data_oob(struct nand_chip *nand,
+ u8 *buf, u8 *oobbuf)
+{
+ int i, oob_len = 0;
+ u8 *dsrc, *osrc;
+
+ oob_len = nand->ecc.bytes + 2;
+ for (i = 0; i < nand->ecc.steps; i++) {
+ if (buf) {
+ dsrc = meson_nfc_data_ptr(nand, i);
+ memcpy(buf, dsrc, nand->ecc.size);
+ buf += nand->ecc.size;
+ }
+ osrc = meson_nfc_oob_ptr(nand, i);
+ memcpy(oobbuf, osrc, oob_len);
+ oobbuf += oob_len;
+ }
+}
+
+static void meson_nfc_set_data_oob(struct nand_chip *nand,
+ const u8 *buf, u8 *oobbuf)
+{
+ int i, oob_len = 0;
+ u8 *dsrc, *osrc;
+
+ oob_len = nand->ecc.bytes + 2;
+ for (i = 0; i < nand->ecc.steps; i++) {
+ if (buf) {
+ dsrc = meson_nfc_data_ptr(nand, i);
+ memcpy(dsrc, buf, nand->ecc.size);
+ buf += nand->ecc.size;
+ }
+ osrc = meson_nfc_oob_ptr(nand, i);
+ memcpy(osrc, oobbuf, oob_len);
+ oobbuf += oob_len;
+ }
+}
+
+static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms)
+{
+ u32 cmd, cfg;
+ int ret = 0;
+
+ meson_nfc_cmd_idle(nfc, nfc->timing.twb);
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT);
+
+ cfg = readl(nfc->reg_base + NFC_REG_CFG);
+ cfg |= NFC_RB_IRQ_EN;
+ writel(cfg, nfc->reg_base + NFC_REG_CFG);
+
+ reinit_completion(&nfc->completion);
+
+ /* use the max erase time as the maximum clock for waiting R/B */
+ cmd = NFC_CMD_RB | NFC_CMD_RB_INT
+ | nfc->param.chip_select | nfc->timing.tbers_max;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ ret = wait_for_completion_timeout(&nfc->completion,
+ msecs_to_jiffies(timeout_ms));
+ if (ret == 0)
+ ret = -1;
+
+ return ret;
+}
+
+static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int i, count;
+
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ info = &meson_chip->info_buf[i];
+ *info |= oob_buf[count];
+ *info |= oob_buf[count + 1] << 8;
+ }
+}
+
+static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int i, count;
+
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ info = &meson_chip->info_buf[i];
+ oob_buf[count] = *info;
+ oob_buf[count + 1] = *info >> 8;
+ }
+}
+
+static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips,
+ u64 *correct_bitmap)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int ret = 0, i;
+
+ for (i = 0; i < nand->ecc.steps; i++) {
+ info = &meson_chip->info_buf[i];
+ if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) {
+ mtd->ecc_stats.corrected += ECC_ERR_CNT(*info);
+ *bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info));
+ *correct_bitmap |= BIT_ULL(i);
+ continue;
+ }
+ if ((nand->options & NAND_NEED_SCRAMBLING) &&
+ ECC_ZERO_CNT(*info) < nand->ecc.strength) {
+ mtd->ecc_stats.corrected += ECC_ZERO_CNT(*info);
+ *bitflips = max_t(u32, *bitflips,
+ ECC_ZERO_CNT(*info));
+ ret = ECC_CHECK_RETURN_FF;
+ } else {
+ ret = -EBADMSG;
+ }
+ }
+ return ret;
+}
+
+static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
+ int datalen, void *infobuf, int infolen,
+ enum dma_data_direction dir)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ u32 cmd;
+ int ret = 0;
+
+ nfc->daddr = dma_map_single(nfc->dev, databuf, datalen, dir);
+ ret = dma_mapping_error(nfc->dev, nfc->daddr);
+ if (ret) {
+ dev_err(nfc->dev, "DMA mapping error\n");
+ return ret;
+ }
+ cmd = GENCMDDADDRL(NFC_CMD_ADL, nfc->daddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ cmd = GENCMDDADDRH(NFC_CMD_ADH, nfc->daddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ if (infobuf) {
+ nfc->iaddr = dma_map_single(nfc->dev, infobuf, infolen, dir);
+ ret = dma_mapping_error(nfc->dev, nfc->iaddr);
+ if (ret) {
+ dev_err(nfc->dev, "DMA mapping error\n");
+ dma_unmap_single(nfc->dev,
+ nfc->daddr, datalen, dir);
+ return ret;
+ }
+ nfc->info_bytes = infolen;
+ cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ cmd = GENCMDIADDRH(NFC_CMD_AIH, nfc->iaddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ }
+
+ return ret;
+}
+
+static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
+ int datalen, int infolen,
+ enum dma_data_direction dir)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+
+ dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
+ if (infolen) {
+ dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
+ nfc->info_bytes = 0;
+ }
+}
+
+static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret = 0;
+ u32 cmd;
+ u8 *info;
+
+ info = kzalloc(PER_INFO_BYTE, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = meson_nfc_dma_buffer_setup(nand, buf, len, info,
+ PER_INFO_BYTE, DMA_FROM_DEVICE);
+ if (ret)
+ goto out;
+
+ cmd = NFC_CMD_N2M | (len & GENMASK(13, 0));
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ meson_nfc_dma_buffer_release(nand, len, PER_INFO_BYTE, DMA_FROM_DEVICE);
+
+out:
+ kfree(info);
+
+ return ret;
+}
+
+static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret = 0;
+ u32 cmd;
+
+ ret = meson_nfc_dma_buffer_setup(nand, buf, len, NULL,
+ 0, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ cmd = NFC_CMD_M2N | (len & GENMASK(13, 0));
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ meson_nfc_dma_buffer_release(nand, len, 0, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
+ int page, bool in)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(nand));
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ u32 *addrs = nfc->cmdfifo.rw.addrs;
+ u32 cs = nfc->param.chip_select;
+ u32 cmd0, cmd_num, row_start;
+ int ret = 0, i;
+
+ cmd_num = sizeof(struct nand_rw_cmd) / sizeof(int);
+
+ cmd0 = in ? NAND_CMD_READ0 : NAND_CMD_SEQIN;
+ nfc->cmdfifo.rw.cmd0 = cs | NFC_CMD_CLE | cmd0;
+
+ addrs[0] = cs | NFC_CMD_ALE | 0;
+ if (mtd->writesize <= 512) {
+ cmd_num--;
+ row_start = 1;
+ } else {
+ addrs[1] = cs | NFC_CMD_ALE | 0;
+ row_start = 2;
+ }
+
+ addrs[row_start] = cs | NFC_CMD_ALE | ROW_ADDER(page, 0);
+ addrs[row_start + 1] = cs | NFC_CMD_ALE | ROW_ADDER(page, 1);
+
+ if (nand->options & NAND_ROW_ADDR_3)
+ addrs[row_start + 2] =
+ cs | NFC_CMD_ALE | ROW_ADDER(page, 2);
+ else
+ cmd_num--;
+
+ /* subtract cmd1 */
+ cmd_num--;
+
+ for (i = 0; i < cmd_num; i++)
+ writel_relaxed(nfc->cmdfifo.cmd[i],
+ nfc->reg_base + NFC_REG_CMD);
+
+ if (in) {
+ nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART;
+ writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tR_max));
+ } else {
+ meson_nfc_cmd_idle(nfc, nfc->timing.tadl);
+ }
+
+ return ret;
+}
+
+static int meson_nfc_write_page_sub(struct nand_chip *nand,
+ int page, int raw)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(nand));
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int data_len, info_len;
+ u32 cmd;
+ int ret;
+
+ meson_nfc_select_chip(nand, nand->cur_cs);
+
+ data_len = mtd->writesize + mtd->oobsize;
+ info_len = nand->ecc.steps * PER_INFO_BYTE;
+
+ ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRWRITE);
+ if (ret)
+ return ret;
+
+ ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
+ data_len, meson_chip->info_buf,
+ info_len, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ if (nand->options & NAND_NEED_SCRAMBLING) {
+ meson_nfc_cmd_seed(nfc, page);
+ meson_nfc_cmd_access(nand, raw, DIRWRITE,
+ NFC_CMD_SCRAMBLER_ENABLE);
+ } else {
+ meson_nfc_cmd_access(nand, raw, DIRWRITE,
+ NFC_CMD_SCRAMBLER_DISABLE);
+ }
+
+ cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tPROG_max));
+
+ meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_write_page_raw(struct nand_chip *nand, const u8 *buf,
+ int oob_required, int page)
+{
+ u8 *oob_buf = nand->oob_poi;
+
+ meson_nfc_set_data_oob(nand, buf, oob_buf);
+
+ return meson_nfc_write_page_sub(nand, page, 1);
+}
+
+static int meson_nfc_write_page_hwecc(struct nand_chip *nand,
+ const u8 *buf, int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u8 *oob_buf = nand->oob_poi;
+
+ memcpy(meson_chip->data_buf, buf, mtd->writesize);
+ memset(meson_chip->info_buf, 0, nand->ecc.steps * PER_INFO_BYTE);
+ meson_nfc_set_user_byte(nand, oob_buf);
+
+ return meson_nfc_write_page_sub(nand, page, 0);
+}
+
+static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
+ struct nand_chip *nand, int raw)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ u32 neccpages;
+ int ret;
+
+ neccpages = raw ? 1 : nand->ecc.steps;
+ info = &meson_chip->info_buf[neccpages - 1];
+ do {
+ usleep_range(10, 15);
+ /* info is updated by nfc dma engine*/
+ smp_rmb();
+ dma_sync_single_for_cpu(nfc->dev, nfc->iaddr, nfc->info_bytes,
+ DMA_FROM_DEVICE);
+ ret = *info & ECC_COMPLETE;
+ } while (!ret);
+}
+
+static int meson_nfc_read_page_sub(struct nand_chip *nand,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int data_len, info_len;
+ int ret;
+
+ meson_nfc_select_chip(nand, nand->cur_cs);
+
+ data_len = mtd->writesize + mtd->oobsize;
+ info_len = nand->ecc.steps * PER_INFO_BYTE;
+
+ ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRREAD);
+ if (ret)
+ return ret;
+
+ ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
+ data_len, meson_chip->info_buf,
+ info_len, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ if (nand->options & NAND_NEED_SCRAMBLING) {
+ meson_nfc_cmd_seed(nfc, page);
+ meson_nfc_cmd_access(nand, raw, DIRREAD,
+ NFC_CMD_SCRAMBLER_ENABLE);
+ } else {
+ meson_nfc_cmd_access(nand, raw, DIRREAD,
+ NFC_CMD_SCRAMBLER_DISABLE);
+ }
+
+ ret = meson_nfc_wait_dma_finish(nfc);
+ meson_nfc_check_ecc_pages_valid(nfc, nand, raw);
+
+ meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_read_page_raw(struct nand_chip *nand, u8 *buf,
+ int oob_required, int page)
+{
+ u8 *oob_buf = nand->oob_poi;
+ int ret;
+
+ ret = meson_nfc_read_page_sub(nand, page, 1);
+ if (ret)
+ return ret;
+
+ meson_nfc_get_data_oob(nand, buf, oob_buf);
+
+ return 0;
+}
+
+static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ u64 correct_bitmap = 0;
+ u32 bitflips = 0;
+ u8 *oob_buf = nand->oob_poi;
+ int ret, i;
+
+ ret = meson_nfc_read_page_sub(nand, page, 0);
+ if (ret)
+ return ret;
+
+ meson_nfc_get_user_byte(nand, oob_buf);
+ ret = meson_nfc_ecc_correct(nand, &bitflips, &correct_bitmap);
+ if (ret == ECC_CHECK_RETURN_FF) {
+ if (buf)
+ memset(buf, 0xff, mtd->writesize);
+ memset(oob_buf, 0xff, mtd->oobsize);
+ } else if (ret < 0) {
+ if ((nand->options & NAND_NEED_SCRAMBLING) || !buf) {
+ mtd->ecc_stats.failed++;
+ return bitflips;
+ }
+ ret = meson_nfc_read_page_raw(nand, buf, 0, page);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nand->ecc.steps ; i++) {
+ u8 *data = buf + i * ecc->size;
+ u8 *oob = nand->oob_poi + i * (ecc->bytes + 2);
+
+ if (correct_bitmap & BIT_ULL(i))
+ continue;
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 2,
+ NULL, 0,
+ ecc->strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ bitflips = max_t(u32, bitflips, ret);
+ }
+ }
+ } else if (buf && buf != meson_chip->data_buf) {
+ memcpy(buf, meson_chip->data_buf, mtd->writesize);
+ }
+
+ return bitflips;
+}
+
+static int meson_nfc_read_oob_raw(struct nand_chip *nand, int page)
+{
+ return meson_nfc_read_page_raw(nand, NULL, 1, page);
+}
+
+static int meson_nfc_read_oob(struct nand_chip *nand, int page)
+{
+ return meson_nfc_read_page_hwecc(nand, NULL, 1, page);
+}
+
+static bool meson_nfc_is_buffer_dma_safe(const void *buffer)
+{
+ if ((uintptr_t)buffer % DMA_ADDR_ALIGN)
+ return false;
+
+ if (virt_addr_valid(buffer) && (!object_is_on_stack(buffer)))
+ return true;
+ return false;
+}
+
+static void *
+meson_nand_op_get_dma_safe_input_buf(const struct nand_op_instr *instr)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR))
+ return NULL;
+
+ if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.in))
+ return instr->ctx.data.buf.in;
+
+ return kzalloc(instr->ctx.data.len, GFP_KERNEL);
+}
+
+static void
+meson_nand_op_put_dma_safe_input_buf(const struct nand_op_instr *instr,
+ void *buf)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR) ||
+ WARN_ON(!buf))
+ return;
+
+ if (buf == instr->ctx.data.buf.in)
+ return;
+
+ memcpy(instr->ctx.data.buf.in, buf, instr->ctx.data.len);
+ kfree(buf);
+}
+
+static void *
+meson_nand_op_get_dma_safe_output_buf(const struct nand_op_instr *instr)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR))
+ return NULL;
+
+ if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.out))
+ return (void *)instr->ctx.data.buf.out;
+
+ return kmemdup(instr->ctx.data.buf.out,
+ instr->ctx.data.len, GFP_KERNEL);
+}
+
+static void
+meson_nand_op_put_dma_safe_output_buf(const struct nand_op_instr *instr,
+ const void *buf)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR) ||
+ WARN_ON(!buf))
+ return;
+
+ if (buf != instr->ctx.data.buf.out)
+ kfree(buf);
+}
+
+static int meson_nfc_exec_op(struct nand_chip *nand,
+ const struct nand_operation *op, bool check_only)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ const struct nand_op_instr *instr = NULL;
+ void *buf;
+ u32 op_id, delay_idle, cmd;
+ int i;
+
+ if (check_only)
+ return 0;
+
+ meson_nfc_select_chip(nand, op->cs);
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+ delay_idle = DIV_ROUND_UP(PSEC_TO_NSEC(instr->delay_ns),
+ meson_chip->level1_divider *
+ NFC_CLK_CYCLE);
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ cmd = nfc->param.chip_select | NFC_CMD_CLE;
+ cmd |= instr->ctx.cmd.opcode & 0xff;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ cmd = nfc->param.chip_select | NFC_CMD_ALE;
+ cmd |= instr->ctx.addr.addrs[i] & 0xff;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ }
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ buf = meson_nand_op_get_dma_safe_input_buf(instr);
+ if (!buf)
+ return -ENOMEM;
+ meson_nfc_read_buf(nand, buf, instr->ctx.data.len);
+ meson_nand_op_put_dma_safe_input_buf(instr, buf);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ buf = meson_nand_op_get_dma_safe_output_buf(instr);
+ if (!buf)
+ return -ENOMEM;
+ meson_nfc_write_buf(nand, buf, instr->ctx.data.len);
+ meson_nand_op_put_dma_safe_output_buf(instr, buf);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ meson_nfc_queue_rb(nfc, instr->ctx.waitrdy.timeout_ms);
+ if (instr->delay_ns)
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+ }
+ }
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ return 0;
+}
+
+static int meson_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = 2 + (section * (2 + nand->ecc.bytes));
+ oobregion->length = nand->ecc.bytes;
+
+ return 0;
+}
+
+static int meson_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = section * (2 + nand->ecc.bytes);
+ oobregion->length = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
+ .ecc = meson_ooblayout_ecc,
+ .free = meson_ooblayout_free,
+};
+
+static int meson_nfc_clk_init(struct meson_nfc *nfc)
+{
+ int ret;
+
+ /* request core clock */
+ nfc->core_clk = devm_clk_get(nfc->dev, "core");
+ if (IS_ERR(nfc->core_clk)) {
+ dev_err(nfc->dev, "failed to get core clock\n");
+ return PTR_ERR(nfc->core_clk);
+ }
+
+ nfc->device_clk = devm_clk_get(nfc->dev, "device");
+ if (IS_ERR(nfc->device_clk)) {
+ dev_err(nfc->dev, "failed to get device clock\n");
+ return PTR_ERR(nfc->device_clk);
+ }
+
+ nfc->phase_tx = devm_clk_get(nfc->dev, "tx");
+ if (IS_ERR(nfc->phase_tx)) {
+ dev_err(nfc->dev, "failed to get TX clk\n");
+ return PTR_ERR(nfc->phase_tx);
+ }
+
+ nfc->phase_rx = devm_clk_get(nfc->dev, "rx");
+ if (IS_ERR(nfc->phase_rx)) {
+ dev_err(nfc->dev, "failed to get RX clk\n");
+ return PTR_ERR(nfc->phase_rx);
+ }
+
+ /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
+ regmap_update_bits(nfc->reg_clk,
+ 0, CLK_SELECT_NAND, CLK_SELECT_NAND);
+
+ ret = clk_prepare_enable(nfc->core_clk);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->device_clk);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable device clock\n");
+ goto err_device_clk;
+ }
+
+ ret = clk_prepare_enable(nfc->phase_tx);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable TX clock\n");
+ goto err_phase_tx;
+ }
+
+ ret = clk_prepare_enable(nfc->phase_rx);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable RX clock\n");
+ goto err_phase_rx;
+ }
+
+ ret = clk_set_rate(nfc->device_clk, 24000000);
+ if (ret)
+ goto err_disable_rx;
+
+ return 0;
+
+err_disable_rx:
+ clk_disable_unprepare(nfc->phase_rx);
+err_phase_rx:
+ clk_disable_unprepare(nfc->phase_tx);
+err_phase_tx:
+ clk_disable_unprepare(nfc->device_clk);
+err_device_clk:
+ clk_disable_unprepare(nfc->core_clk);
+ return ret;
+}
+
+static void meson_nfc_disable_clk(struct meson_nfc *nfc)
+{
+ clk_disable_unprepare(nfc->phase_rx);
+ clk_disable_unprepare(nfc->phase_tx);
+ clk_disable_unprepare(nfc->device_clk);
+ clk_disable_unprepare(nfc->core_clk);
+}
+
+static void meson_nfc_free_buffer(struct nand_chip *nand)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+
+ kfree(meson_chip->info_buf);
+ kfree(meson_chip->data_buf);
+}
+
+static int meson_chip_buffer_init(struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u32 page_bytes, info_bytes, nsectors;
+
+ nsectors = mtd->writesize / nand->ecc.size;
+
+ page_bytes = mtd->writesize + mtd->oobsize;
+ info_bytes = nsectors * PER_INFO_BYTE;
+
+ meson_chip->data_buf = kmalloc(page_bytes, GFP_KERNEL);
+ if (!meson_chip->data_buf)
+ return -ENOMEM;
+
+ meson_chip->info_buf = kmalloc(info_bytes, GFP_KERNEL);
+ if (!meson_chip->info_buf) {
+ kfree(meson_chip->data_buf);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static
+int meson_nfc_setup_interface(struct nand_chip *nand, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ const struct nand_sdr_timings *timings;
+ u32 div, bt_min, bt_max, tbers_clocks;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ div = DIV_ROUND_UP((timings->tRC_min / 1000), NFC_CLK_CYCLE);
+ bt_min = (timings->tREA_max + NFC_DEFAULT_DELAY) / div;
+ bt_max = (NFC_DEFAULT_DELAY + timings->tRHOH_min +
+ timings->tRC_min / 2) / div;
+
+ meson_chip->twb = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tWB_max),
+ div * NFC_CLK_CYCLE);
+ meson_chip->tadl = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tADL_min),
+ div * NFC_CLK_CYCLE);
+ tbers_clocks = DIV_ROUND_UP_ULL(PSEC_TO_NSEC(timings->tBERS_max),
+ div * NFC_CLK_CYCLE);
+ meson_chip->tbers_max = ilog2(tbers_clocks);
+ if (!is_power_of_2(tbers_clocks))
+ meson_chip->tbers_max++;
+
+ bt_min = DIV_ROUND_UP(bt_min, 1000);
+ bt_max = DIV_ROUND_UP(bt_max, 1000);
+
+ if (bt_max < bt_min)
+ return -EINVAL;
+
+ meson_chip->level1_divider = div;
+ meson_chip->clk_rate = 1000000000 / meson_chip->level1_divider;
+ meson_chip->bus_timing = (bt_min + bt_max) / 2 + 1;
+
+ return 0;
+}
+
+static int meson_nand_bch_mode(struct nand_chip *nand)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int i;
+
+ if (nand->ecc.strength > 60 || nand->ecc.strength < 8)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) {
+ if (meson_ecc[i].strength == nand->ecc.strength) {
+ meson_chip->bch_mode = meson_ecc[i].bch;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void meson_nand_detach_chip(struct nand_chip *nand)
+{
+ meson_nfc_free_buffer(nand);
+}
+
+static int meson_nand_attach_chip(struct nand_chip *nand)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ int ret;
+
+ if (!mtd->name) {
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand%d",
+ dev_name(nfc->dev),
+ meson_chip->sels[0]);
+ if (!mtd->name)
+ return -ENOMEM;
+ }
+
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
+ mtd->oobsize - 2);
+ if (ret) {
+ dev_err(nfc->dev, "failed to ECC init\n");
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &meson_ooblayout_ops);
+
+ ret = meson_nand_bch_mode(nand);
+ if (ret)
+ return -EINVAL;
+
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ nand->ecc.write_page_raw = meson_nfc_write_page_raw;
+ nand->ecc.write_page = meson_nfc_write_page_hwecc;
+ nand->ecc.write_oob_raw = nand_write_oob_std;
+ nand->ecc.write_oob = nand_write_oob_std;
+
+ nand->ecc.read_page_raw = meson_nfc_read_page_raw;
+ nand->ecc.read_page = meson_nfc_read_page_hwecc;
+ nand->ecc.read_oob_raw = meson_nfc_read_oob_raw;
+ nand->ecc.read_oob = meson_nfc_read_oob;
+
+ if (nand->options & NAND_BUSWIDTH_16) {
+ dev_err(nfc->dev, "16bits bus width not supported");
+ return -EINVAL;
+ }
+ ret = meson_chip_buffer_init(nand);
+ if (ret)
+ return -ENOMEM;
+
+ return ret;
+}
+
+static const struct nand_controller_ops meson_nand_controller_ops = {
+ .attach_chip = meson_nand_attach_chip,
+ .detach_chip = meson_nand_detach_chip,
+ .setup_interface = meson_nfc_setup_interface,
+ .exec_op = meson_nfc_exec_op,
+};
+
+static int
+meson_nfc_nand_chip_init(struct device *dev,
+ struct meson_nfc *nfc, struct device_node *np)
+{
+ struct meson_nfc_nand_chip *meson_chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int ret, i;
+ u32 tmp, nsels;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (!nsels || nsels > MAX_CE_NUM) {
+ dev_err(dev, "invalid register property size\n");
+ return -EINVAL;
+ }
+
+ meson_chip = devm_kzalloc(dev, struct_size(meson_chip, sels, nsels),
+ GFP_KERNEL);
+ if (!meson_chip)
+ return -ENOMEM;
+
+ meson_chip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "could not retrieve register property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", tmp);
+ return -EINVAL;
+ }
+ }
+
+ nand = &meson_chip->nand;
+ nand->controller = &nfc->controller;
+ nand->controller->ops = &meson_nand_controller_ops;
+ nand_set_flash_node(nand, np);
+ nand_set_controller_data(nand, nfc);
+
+ nand->options |= NAND_USES_DMA;
+ mtd = nand_to_mtd(nand);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ ret = nand_scan(nand, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register MTD device: %d\n", ret);
+ nand_cleanup(nand);
+ return ret;
+ }
+
+ list_add_tail(&meson_chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
+{
+ struct meson_nfc_nand_chip *meson_chip;
+ struct mtd_info *mtd;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ meson_chip = list_first_entry(&nfc->chips,
+ struct meson_nfc_nand_chip, node);
+ mtd = nand_to_mtd(&meson_chip->nand);
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ nand_cleanup(&meson_chip->nand);
+ list_del(&meson_chip->node);
+ }
+
+ return 0;
+}
+
+static int meson_nfc_nand_chips_init(struct device *dev,
+ struct meson_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = meson_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ meson_nfc_nand_chip_cleanup(nfc);
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t meson_nfc_irq(int irq, void *id)
+{
+ struct meson_nfc *nfc = id;
+ u32 cfg;
+
+ cfg = readl(nfc->reg_base + NFC_REG_CFG);
+ if (!(cfg & NFC_RB_IRQ_EN))
+ return IRQ_NONE;
+
+ cfg &= ~(NFC_RB_IRQ_EN);
+ writel(cfg, nfc->reg_base + NFC_REG_CFG);
+
+ complete(&nfc->completion);
+ return IRQ_HANDLED;
+}
+
+static const struct meson_nfc_data meson_gxl_data = {
+ .ecc_caps = &meson_gxl_ecc_caps,
+};
+
+static const struct meson_nfc_data meson_axg_data = {
+ .ecc_caps = &meson_axg_ecc_caps,
+};
+
+static const struct of_device_id meson_nfc_id_table[] = {
+ {
+ .compatible = "amlogic,meson-gxl-nfc",
+ .data = &meson_gxl_data,
+ }, {
+ .compatible = "amlogic,meson-axg-nfc",
+ .data = &meson_axg_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, meson_nfc_id_table);
+
+static int meson_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_nfc *nfc;
+ struct resource *res;
+ int ret, irq;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->data = of_device_get_match_data(&pdev->dev);
+ if (!nfc->data)
+ return -ENODEV;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+ init_completion(&nfc->completion);
+
+ nfc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->reg_base))
+ return PTR_ERR(nfc->reg_base);
+
+ nfc->reg_clk =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "amlogic,mmc-syscon");
+ if (IS_ERR(nfc->reg_clk)) {
+ dev_err(dev, "Failed to lookup clock base\n");
+ return PTR_ERR(nfc->reg_clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ ret = meson_nfc_clk_init(nfc);
+ if (ret) {
+ dev_err(dev, "failed to initialize NAND clock\n");
+ return ret;
+ }
+
+ writel(0, nfc->reg_base + NFC_REG_CFG);
+ ret = devm_request_irq(dev, irq, meson_nfc_irq, 0, dev_name(dev), nfc);
+ if (ret) {
+ dev_err(dev, "failed to request NFC IRQ\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set DMA mask\n");
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = meson_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init NAND chips\n");
+ goto err_clk;
+ }
+
+ return 0;
+err_clk:
+ meson_nfc_disable_clk(nfc);
+ return ret;
+}
+
+static int meson_nfc_remove(struct platform_device *pdev)
+{
+ struct meson_nfc *nfc = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = meson_nfc_nand_chip_cleanup(nfc);
+ if (ret)
+ return ret;
+
+ meson_nfc_disable_clk(nfc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver meson_nfc_driver = {
+ .probe = meson_nfc_probe,
+ .remove = meson_nfc_remove,
+ .driver = {
+ .name = "meson-nand",
+ .of_match_table = meson_nfc_id_table,
+ },
+};
+module_platform_driver(meson_nfc_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Liang Yang <liang.yang@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic's Meson NAND Flash Controller driver");
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
new file mode 100644
index 000000000..5b9271b9c
--- /dev/null
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis of mxc_nand.c.
+ * Reworked and extended by Piotr Ziecik <kosmo@semihalf.com>.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/mpc5121.h>
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS 8
+#define NFC_SPARE_LEN 0x40
+#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR 0x1E04
+#define NFC_FLASH_ADDR 0x1E06
+#define NFC_FLASH_CMD 0x1E08
+#define NFC_CONFIG 0x1E0A
+#define NFC_ECC_STATUS1 0x1E0C
+#define NFC_ECC_STATUS2 0x1E0E
+#define NFC_SPAS 0x1E10
+#define NFC_WRPROT 0x1E12
+#define NFC_NF_WRPRST 0x1E18
+#define NFC_CONFIG1 0x1E1A
+#define NFC_CONFIG2 0x1E1C
+#define NFC_UNLOCKSTART_BLK0 0x1E20
+#define NFC_UNLOCKEND_BLK0 0x1E22
+#define NFC_UNLOCKSTART_BLK1 0x1E24
+#define NFC_UNLOCKEND_BLK1 0x1E26
+#define NFC_UNLOCKSTART_BLK2 0x1E28
+#define NFC_UNLOCKEND_BLK2 0x1E2A
+#define NFC_UNLOCKSTART_BLK3 0x1E2C
+#define NFC_UNLOCKEND_BLK3 0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK (7 << 0)
+#define NFC_ACTIVE_CS_SHIFT 5
+#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED (1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT (1 << 0)
+#define NFC_FULL_PAGE_DMA (1 << 1)
+#define NFC_SPARE_ONLY (1 << 2)
+#define NFC_ECC_ENABLE (1 << 3)
+#define NFC_INT_MASK (1 << 4)
+#define NFC_BIG_ENDIAN (1 << 5)
+#define NFC_RESET (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_PPB_32 (0 << 9)
+#define NFC_PPB_64 (1 << 9)
+#define NFC_PPB_128 (2 << 9)
+#define NFC_PPB_256 (3 << 9)
+#define NFC_PPB_MASK (3 << 9)
+#define NFC_FULL_PAGE_INT (1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND (1 << 0)
+#define NFC_ADDRESS (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+#define NFC_CMD_FAIL (1 << 15)
+#define NFC_INT (1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT (1 << 0)
+#define NFC_WPC_LOCK (1 << 1)
+#define NFC_WPC_UNLOCK (1 << 2)
+
+#define DRV_NAME "mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
+#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
+
+struct mpc5121_nfc_prv {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ wait_queue_head_t irq_waitq;
+ uint column;
+ int spareonly;
+ void __iomem *csreg;
+ struct device *dev;
+};
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+ return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+ out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+ nfc_write(mtd, NFC_FLASH_ADDR, addr);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+ nfc_write(mtd, NFC_FLASH_CMD, cmd);
+ nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* NFC interrupt handler */
+static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
+{
+ struct mtd_info *mtd = data;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+ nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ wake_up(&prv->irq_waitq);
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for operation complete */
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+ int rv;
+
+ if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ rv = wait_event_timeout(prv->irq_waitq,
+ (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
+
+ if (!rv)
+ dev_warn(prv->dev,
+ "Timeout while waiting for interrupt.\n");
+ }
+
+ nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ u32 pagemask = chip->pagemask;
+
+ if (column != -1) {
+ mpc5121_nfc_send_addr(mtd, column);
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_addr(mtd, column >> 8);
+ }
+
+ if (page != -1) {
+ do {
+ mpc5121_nfc_send_addr(mtd, page & 0xFF);
+ page >>= 8;
+ pagemask >>= 8;
+ } while (pagemask);
+ }
+}
+
+/* Control chip select signals */
+static void mpc5121_nfc_select_chip(struct nand_chip *nand, int chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+
+ if (chip < 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+ return;
+ }
+
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+ NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+
+/* Init external chip select logic on ADS5121 board */
+static int ads5121_chipselect_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
+ if (dn) {
+ prv->csreg = of_iomap(dn, 0);
+ of_node_put(dn);
+ if (!prv->csreg)
+ return -ENOMEM;
+
+ /* CPLD Register 9 controls NAND /CE Lines */
+ prv->csreg += 9;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Control chips select signal on ADS5121 board */
+static void ads5121_select_chip(struct nand_chip *nand, int chip)
+{
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
+ u8 v;
+
+ v = in_8(prv->csreg);
+ v |= 0x0F;
+
+ if (chip >= 0) {
+ mpc5121_nfc_select_chip(nand, 0);
+ v &= ~(1 << chip);
+ } else
+ mpc5121_nfc_select_chip(nand, -1);
+
+ out_8(prv->csreg, v);
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct nand_chip *nand)
+{
+ /*
+ * NFC handles ready/busy signal internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct nand_chip *chip, unsigned command,
+ int column, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+ prv->column = (column >= 0) ? column : 0;
+ prv->spareonly = 0;
+
+ switch (command) {
+ case NAND_CMD_PAGEPROG:
+ mpc5121_nfc_send_prog_page(mtd);
+ break;
+ /*
+ * NFC does not support sub-page reads and writes,
+ * so emulate them using full page transfers.
+ */
+ case NAND_CMD_READ0:
+ column = 0;
+ break;
+
+ case NAND_CMD_READ1:
+ prv->column += 256;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_READOOB:
+ prv->spareonly = 1;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_SEQIN:
+ mpc5121_nfc_command(chip, NAND_CMD_READ0, column, page);
+ column = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_READID:
+ case NAND_CMD_STATUS:
+ break;
+
+ default:
+ return;
+ }
+
+ mpc5121_nfc_send_cmd(mtd, command);
+ mpc5121_nfc_addr_cycle(mtd, column, page);
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+ mpc5121_nfc_send_read_page(mtd);
+ break;
+
+ case NAND_CMD_READID:
+ mpc5121_nfc_send_read_id(mtd);
+ break;
+
+ case NAND_CMD_STATUS:
+ mpc5121_nfc_send_read_status(mtd);
+ if (chip->options & NAND_BUSWIDTH_16)
+ prv->column = 1;
+ else
+ prv->column = 0;
+ break;
+ }
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+ u8 *buffer, uint size, int wr)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
+ uint o, s, sbsize, blksize;
+
+ /*
+ * NAND spare area is available through NFC spare buffers.
+ * The NFC divides spare area into (page_size / 512) chunks.
+ * Each chunk is placed into separate spare memory area, using
+ * first (spare_size / num_of_chunks) bytes of the buffer.
+ *
+ * For NAND device in which the spare area is not divided fully
+ * by the number of chunks, number of used bytes in each spare
+ * buffer is rounded down to the nearest even number of bytes,
+ * and all remaining bytes are added to the last used spare area.
+ *
+ * For more information read section 26.6.10 of MPC5121e
+ * Microcontroller Reference Manual, Rev. 3.
+ */
+
+ /* Calculate number of valid bytes in each spare buffer */
+ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+ while (size) {
+ /* Calculate spare buffer number */
+ s = offset / sbsize;
+ if (s > NFC_SPARE_BUFFERS - 1)
+ s = NFC_SPARE_BUFFERS - 1;
+
+ /*
+ * Calculate offset to requested data block in selected spare
+ * buffer and its size.
+ */
+ o = offset - (s * sbsize);
+ blksize = min(sbsize - o, size);
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+ buffer, blksize);
+ else
+ memcpy_fromio(buffer,
+ prv->regs + NFC_SPARE_AREA(s) + o, blksize);
+
+ buffer += blksize;
+ offset += blksize;
+ size -= blksize;
+ }
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
+ int wr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+ uint c = prv->column;
+ uint l;
+
+ /* Handle spare area access */
+ if (prv->spareonly || c >= mtd->writesize) {
+ /* Calculate offset from beginning of spare area */
+ if (c >= mtd->writesize)
+ c -= mtd->writesize;
+
+ prv->column += len;
+ mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+ return;
+ }
+
+ /*
+ * Handle main area access - limit copy length to prevent
+ * crossing main/spare boundary.
+ */
+ l = min((uint)len, mtd->writesize - c);
+ prv->column += l;
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+ else
+ memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+ /* Handle crossing main/spare boundary */
+ if (l != len) {
+ buf += l;
+ len -= l;
+ mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+ }
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(nand_to_mtd(chip), buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct nand_chip *chip, const u_char *buf,
+ int len)
+{
+ mpc5121_nfc_buf_copy(nand_to_mtd(chip), (u_char *)buf, len, 1);
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct nand_chip *chip)
+{
+ u8 tmp;
+
+ mpc5121_nfc_read_buf(chip, &tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+ struct mpc512x_reset_module *rm;
+ struct device_node *rmnode;
+ uint rcw_pagesize = 0;
+ uint rcw_sparesize = 0;
+ uint rcw_width;
+ uint rcwh;
+ uint romloc, ps;
+ int ret = 0;
+
+ rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
+ if (!rmnode) {
+ dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
+ "node in device tree!\n");
+ return -ENODEV;
+ }
+
+ rm = of_iomap(rmnode, 0);
+ if (!rm) {
+ dev_err(prv->dev, "Error mapping reset module node!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ rcwh = in_be32(&rm->rcwhr);
+
+ /* Bit 6: NFC bus width */
+ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+ /* Bit 7: NFC Page/Spare size */
+ ps = (rcwh >> 7) & 0x1;
+
+ /* Bits [22:21]: ROM Location */
+ romloc = (rcwh >> 21) & 0x3;
+
+ /* Decode RCW bits */
+ switch ((ps << 2) | romloc) {
+ case 0x00:
+ case 0x01:
+ rcw_pagesize = 512;
+ rcw_sparesize = 16;
+ break;
+ case 0x02:
+ case 0x03:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 128;
+ break;
+ case 0x04:
+ case 0x05:
+ rcw_pagesize = 2048;
+ rcw_sparesize = 64;
+ break;
+ case 0x06:
+ case 0x07:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 218;
+ break;
+ }
+
+ mtd->writesize = rcw_pagesize;
+ mtd->oobsize = rcw_sparesize;
+ if (rcw_width == 2)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ dev_notice(prv->dev, "Configured for "
+ "%u-bit NAND, page size %u "
+ "with %u spare.\n",
+ rcw_width * 8, rcw_pagesize,
+ rcw_sparesize);
+ iounmap(rm);
+out:
+ of_node_put(rmnode);
+ return ret;
+}
+
+/* Free driver resources */
+static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+ if (prv->clk)
+ clk_disable_unprepare(prv->clk);
+
+ if (prv->csreg)
+ iounmap(prv->csreg);
+}
+
+static int mpc5121_nfc_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops mpc5121_nfc_ops = {
+ .attach_chip = mpc5121_nfc_attach_chip,
+};
+
+static int mpc5121_nfc_probe(struct platform_device *op)
+{
+ struct device_node *dn = op->dev.of_node;
+ struct clk *clk;
+ struct device *dev = &op->dev;
+ struct mpc5121_nfc_prv *prv;
+ struct resource res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ unsigned long regs_paddr, regs_size;
+ const __be32 *chips_no;
+ int resettime = 0;
+ int retval = 0;
+ int rev, len;
+
+ /*
+ * Check SoC revision. This driver supports only NFC
+ * in MPC5121 revision 2 and MPC5123 revision 3.
+ */
+ rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+ if ((rev != 2) && (rev != 3)) {
+ dev_err(dev, "SoC revision %u is not supported!\n", rev);
+ return -ENXIO;
+ }
+
+ prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
+ if (!prv)
+ return -ENOMEM;
+
+ chip = &prv->chip;
+ mtd = nand_to_mtd(chip);
+
+ nand_controller_init(&prv->controller);
+ prv->controller.ops = &mpc5121_nfc_ops;
+ chip->controller = &prv->controller;
+
+ mtd->dev.parent = dev;
+ nand_set_controller_data(chip, prv);
+ nand_set_flash_node(chip, dn);
+ prv->dev = dev;
+
+ /* Read NFC configuration from Reset Config Word */
+ retval = mpc5121_nfc_read_hw_config(mtd);
+ if (retval) {
+ dev_err(dev, "Unable to read NFC config!\n");
+ return retval;
+ }
+
+ prv->irq = irq_of_parse_and_map(dn, 0);
+ if (prv->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ chips_no = of_get_property(dn, "chips", &len);
+ if (!chips_no || len != sizeof(*chips_no)) {
+ dev_err(dev, "Invalid/missing 'chips' property!\n");
+ return -EINVAL;
+ }
+
+ regs_paddr = res.start;
+ regs_size = resource_size(&res);
+
+ if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
+ if (!prv->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mtd->name = "MPC5121 NAND";
+ chip->legacy.dev_ready = mpc5121_nfc_dev_ready;
+ chip->legacy.cmdfunc = mpc5121_nfc_command;
+ chip->legacy.read_byte = mpc5121_nfc_read_byte;
+ chip->legacy.read_buf = mpc5121_nfc_read_buf;
+ chip->legacy.write_buf = mpc5121_nfc_write_buf;
+ chip->legacy.select_chip = mpc5121_nfc_select_chip;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ /* Support external chip-select logic on ADS5121 board */
+ if (of_machine_is_compatible("fsl,mpc5121ads")) {
+ retval = ads5121_chipselect_init(mtd);
+ if (retval) {
+ dev_err(dev, "Chipselect init error!\n");
+ return retval;
+ }
+
+ chip->legacy.select_chip = ads5121_select_chip;
+ }
+
+ /* Enable NFC clock */
+ clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Unable to acquire NFC clock!\n");
+ retval = PTR_ERR(clk);
+ goto error;
+ }
+ retval = clk_prepare_enable(clk);
+ if (retval) {
+ dev_err(dev, "Unable to enable NFC clock!\n");
+ goto error;
+ }
+ prv->clk = clk;
+
+ /* Reset NAND Flash controller */
+ nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+ while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+ if (resettime++ >= NFC_RESET_TIMEOUT) {
+ dev_err(dev, "Timeout while resetting NFC!\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
+ udelay(1);
+ }
+
+ /* Enable write to NFC memory */
+ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+ /* Enable write to all NAND pages */
+ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+ nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+ nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+ /*
+ * Setup NFC:
+ * - Big Endian transfers,
+ * - Interrupt after full page read/write.
+ */
+ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+ NFC_FULL_PAGE_INT);
+
+ /* Set spare area size */
+ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+ init_waitqueue_head(&prv->irq_waitq);
+ retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
+ mtd);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ goto error;
+ }
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Detect NAND chips */
+ retval = nand_scan(chip, be32_to_cpup(chips_no));
+ if (retval) {
+ dev_err(dev, "NAND Flash not found !\n");
+ goto error;
+ }
+
+ /* Set erase block size */
+ switch (mtd->erasesize / mtd->writesize) {
+ case 32:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+ break;
+
+ case 64:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+ break;
+
+ case 128:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+ break;
+
+ case 256:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported NAND flash!\n");
+ retval = -ENXIO;
+ goto error;
+ }
+
+ dev_set_drvdata(dev, mtd);
+
+ /* Register device in MTD */
+ retval = mtd_device_register(mtd, NULL, 0);
+ if (retval) {
+ dev_err(dev, "Error adding MTD device!\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ mpc5121_nfc_free(dev, mtd);
+ return retval;
+}
+
+static int mpc5121_nfc_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(mtd_to_nand(mtd));
+ mpc5121_nfc_free(dev, mtd);
+
+ return 0;
+}
+
+static const struct of_device_id mpc5121_nfc_match[] = {
+ { .compatible = "fsl,mpc5121-nfc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpc5121_nfc_match);
+
+static struct platform_driver mpc5121_nfc_driver = {
+ .probe = mpc5121_nfc_probe,
+ .remove = mpc5121_nfc_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = mpc5121_nfc_match,
+ },
+};
+
+module_platform_driver(mpc5121_nfc_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
new file mode 100644
index 000000000..c115e03ed
--- /dev/null
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * MTK ECC controller driver.
+ * Copyright (C) 2016 MediaTek Inc.
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+
+#include "mtk_ecc.h"
+
+#define ECC_IDLE_MASK BIT(0)
+#define ECC_IRQ_EN BIT(0)
+#define ECC_PG_IRQ_SEL BIT(1)
+#define ECC_OP_ENABLE (1)
+#define ECC_OP_DISABLE (0)
+
+#define ECC_ENCCON (0x00)
+#define ECC_ENCCNFG (0x04)
+#define ECC_MS_SHIFT (16)
+#define ECC_ENCDIADDR (0x08)
+#define ECC_ENCIDLE (0x0C)
+#define ECC_DECCON (0x100)
+#define ECC_DECCNFG (0x104)
+#define DEC_EMPTY_EN BIT(31)
+#define DEC_CNFG_CORRECT (0x3 << 12)
+#define ECC_DECIDLE (0x10C)
+#define ECC_DECENUM0 (0x114)
+
+#define ECC_TIMEOUT (500000)
+
+#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
+#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
+
+struct mtk_ecc_caps {
+ u32 err_mask;
+ u32 err_shift;
+ const u8 *ecc_strength;
+ const u32 *ecc_regs;
+ u8 num_ecc_strength;
+ u8 ecc_mode_shift;
+ u32 parity_bits;
+ int pg_irq_sel;
+};
+
+struct mtk_ecc {
+ struct device *dev;
+ const struct mtk_ecc_caps *caps;
+ void __iomem *regs;
+ struct clk *clk;
+
+ struct completion done;
+ struct mutex lock;
+ u32 sectors;
+
+ u8 *eccdata;
+};
+
+/* ecc strength that each IP supports */
+static const u8 ecc_strength_mt2701[] = {
+ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+ 40, 44, 48, 52, 56, 60
+};
+
+static const u8 ecc_strength_mt2712[] = {
+ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+ 40, 44, 48, 52, 56, 60, 68, 72, 80
+};
+
+static const u8 ecc_strength_mt7622[] = {
+ 4, 6, 8, 10, 12
+};
+
+enum mtk_ecc_regs {
+ ECC_ENCPAR00,
+ ECC_ENCIRQ_EN,
+ ECC_ENCIRQ_STA,
+ ECC_DECDONE,
+ ECC_DECIRQ_EN,
+ ECC_DECIRQ_STA,
+};
+
+static int mt2701_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt2712_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x300,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt7622_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x30,
+ [ECC_ENCIRQ_STA] = 0x34,
+ [ECC_DECDONE] = 0x11c,
+ [ECC_DECIRQ_EN] = 0x140,
+ [ECC_DECIRQ_STA] = 0x144,
+};
+
+static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
+ enum mtk_ecc_operation op)
+{
+ struct device *dev = ecc->dev;
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
+ val & ECC_IDLE_MASK,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ dev_warn(dev, "%s NOT idle\n",
+ op == ECC_ENCODE ? "encoder" : "decoder");
+}
+
+static irqreturn_t mtk_ecc_irq(int irq, void *id)
+{
+ struct mtk_ecc *ecc = id;
+ u32 dec, enc;
+
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
+ & ECC_IRQ_EN;
+ if (dec) {
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
+ if (dec & ecc->sectors) {
+ /*
+ * Clear decode IRQ status once again to ensure that
+ * there will be no extra IRQ.
+ */
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
+ ecc->sectors = 0;
+ complete(&ecc->done);
+ } else {
+ return IRQ_HANDLED;
+ }
+ } else {
+ enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
+ & ECC_IRQ_EN;
+ if (enc)
+ complete(&ecc->done);
+ else
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+ u32 ecc_bit, dec_sz, enc_sz;
+ u32 reg, i;
+
+ for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
+ if (ecc->caps->ecc_strength[i] == config->strength)
+ break;
+ }
+
+ if (i == ecc->caps->num_ecc_strength) {
+ dev_err(ecc->dev, "invalid ecc strength %d\n",
+ config->strength);
+ return -EINVAL;
+ }
+
+ ecc_bit = i;
+
+ if (config->op == ECC_ENCODE) {
+ /* configure ECC encoder (in bits) */
+ enc_sz = config->len << 3;
+
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
+ reg |= (enc_sz << ECC_MS_SHIFT);
+ writel(reg, ecc->regs + ECC_ENCCNFG);
+
+ if (config->mode != ECC_NFI_MODE)
+ writel(lower_32_bits(config->addr),
+ ecc->regs + ECC_ENCDIADDR);
+
+ } else {
+ /* configure ECC decoder (in bits) */
+ dec_sz = (config->len << 3) +
+ config->strength * ecc->caps->parity_bits;
+
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
+ reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
+ reg |= DEC_EMPTY_EN;
+ writel(reg, ecc->regs + ECC_DECCNFG);
+
+ if (config->sectors)
+ ecc->sectors = 1 << (config->sectors - 1);
+ }
+
+ return 0;
+}
+
+void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
+ int sectors)
+{
+ u32 offset, i, err;
+ u32 bitflips = 0;
+
+ stats->corrected = 0;
+ stats->failed = 0;
+
+ for (i = 0; i < sectors; i++) {
+ offset = (i >> 2) << 2;
+ err = readl(ecc->regs + ECC_DECENUM0 + offset);
+ err = err >> ((i % 4) * ecc->caps->err_shift);
+ err &= ecc->caps->err_mask;
+ if (err == ecc->caps->err_mask) {
+ /* uncorrectable errors */
+ stats->failed++;
+ continue;
+ }
+
+ stats->corrected += err;
+ bitflips = max_t(u32, bitflips, err);
+ }
+
+ stats->bitflips = bitflips;
+}
+EXPORT_SYMBOL(mtk_ecc_get_stats);
+
+void mtk_ecc_release(struct mtk_ecc *ecc)
+{
+ clk_disable_unprepare(ecc->clk);
+ put_device(ecc->dev);
+}
+EXPORT_SYMBOL(mtk_ecc_release);
+
+static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
+{
+ mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+ writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
+
+ mtk_ecc_wait_idle(ecc, ECC_DECODE);
+ writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
+}
+
+static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct mtk_ecc *ecc;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ ecc = platform_get_drvdata(pdev);
+ if (!ecc) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ clk_prepare_enable(ecc->clk);
+ mtk_ecc_hw_init(ecc);
+
+ return ecc;
+}
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
+{
+ struct mtk_ecc *ecc = NULL;
+ struct device_node *np;
+
+ np = of_parse_phandle(of_node, "ecc-engine", 0);
+ if (np) {
+ ecc = mtk_ecc_get(np);
+ of_node_put(np);
+ }
+
+ return ecc;
+}
+EXPORT_SYMBOL(of_mtk_ecc_get);
+
+int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+ enum mtk_ecc_operation op = config->op;
+ u16 reg_val;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ecc->lock);
+ if (ret) {
+ dev_err(ecc->dev, "interrupted when attempting to lock\n");
+ return ret;
+ }
+
+ mtk_ecc_wait_idle(ecc, op);
+
+ ret = mtk_ecc_config(ecc, config);
+ if (ret) {
+ mutex_unlock(&ecc->lock);
+ return ret;
+ }
+
+ if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) {
+ init_completion(&ecc->done);
+ reg_val = ECC_IRQ_EN;
+ /*
+ * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it
+ * means this chip can only generate one ecc irq during page
+ * read / write. If is 0, generate one ecc irq each ecc step.
+ */
+ if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
+ reg_val |= ECC_PG_IRQ_SEL;
+ if (op == ECC_ENCODE)
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ else
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
+ }
+
+ writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_enable);
+
+void mtk_ecc_disable(struct mtk_ecc *ecc)
+{
+ enum mtk_ecc_operation op = ECC_ENCODE;
+
+ /* find out the running operation */
+ if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
+ op = ECC_DECODE;
+
+ /* disable it */
+ mtk_ecc_wait_idle(ecc, op);
+ if (op == ECC_DECODE) {
+ /*
+ * Clear decode IRQ status in case there is a timeout to wait
+ * decode IRQ.
+ */
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
+ } else {
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ }
+
+ writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
+
+ mutex_unlock(&ecc->lock);
+}
+EXPORT_SYMBOL(mtk_ecc_disable);
+
+int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
+ (op == ECC_ENCODE) ? "encoder" : "decoder");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_wait_done);
+
+int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
+ u8 *data, u32 bytes)
+{
+ dma_addr_t addr;
+ u32 len;
+ int ret;
+
+ addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ecc->dev, addr);
+ if (ret) {
+ dev_err(ecc->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ config->op = ECC_ENCODE;
+ config->addr = addr;
+ ret = mtk_ecc_enable(ecc, config);
+ if (ret) {
+ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
+ if (ret)
+ goto timeout;
+
+ mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+
+ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
+
+ /* write the parity bytes generated by the ECC back to temp buffer */
+ __ioread32_copy(ecc->eccdata,
+ ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
+ round_up(len, 4));
+
+ /* copy into possibly unaligned OOB region with actual length */
+ memcpy(data + bytes, ecc->eccdata, len);
+timeout:
+
+ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+ mtk_ecc_disable(ecc);
+
+ return ret;
+}
+EXPORT_SYMBOL(mtk_ecc_encode);
+
+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
+{
+ const u8 *ecc_strength = ecc->caps->ecc_strength;
+ int i;
+
+ for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
+ if (*p <= ecc_strength[i]) {
+ if (!i)
+ *p = ecc_strength[i];
+ else if (*p != ecc_strength[i])
+ *p = ecc_strength[i - 1];
+ return;
+ }
+ }
+
+ *p = ecc_strength[ecc->caps->num_ecc_strength - 1];
+}
+EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
+{
+ return ecc->caps->parity_bits;
+}
+EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
+
+static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
+ .err_mask = 0x3f,
+ .err_shift = 8,
+ .ecc_strength = ecc_strength_mt2701,
+ .ecc_regs = mt2701_ecc_regs,
+ .num_ecc_strength = 20,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
+ .pg_irq_sel = 0,
+};
+
+static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
+ .err_mask = 0x7f,
+ .err_shift = 8,
+ .ecc_strength = ecc_strength_mt2712,
+ .ecc_regs = mt2712_ecc_regs,
+ .num_ecc_strength = 23,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
+ .pg_irq_sel = 1,
+};
+
+static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
+ .err_mask = 0x1f,
+ .err_shift = 5,
+ .ecc_strength = ecc_strength_mt7622,
+ .ecc_regs = mt7622_ecc_regs,
+ .num_ecc_strength = 5,
+ .ecc_mode_shift = 4,
+ .parity_bits = 13,
+ .pg_irq_sel = 0,
+};
+
+static const struct of_device_id mtk_ecc_dt_match[] = {
+ {
+ .compatible = "mediatek,mt2701-ecc",
+ .data = &mtk_ecc_caps_mt2701,
+ }, {
+ .compatible = "mediatek,mt2712-ecc",
+ .data = &mtk_ecc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-ecc",
+ .data = &mtk_ecc_caps_mt7622,
+ },
+ {},
+};
+
+static int mtk_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ecc *ecc;
+ struct resource *res;
+ u32 max_eccdata_size;
+ int irq, ret;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ ecc->caps = of_device_get_match_data(dev);
+
+ max_eccdata_size = ecc->caps->num_ecc_strength - 1;
+ max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
+ max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
+ max_eccdata_size = round_up(max_eccdata_size, 4);
+ ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
+ if (!ecc->eccdata)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ecc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ecc->regs)) {
+ dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
+ return PTR_ERR(ecc->regs);
+ }
+
+ ecc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ecc->clk)) {
+ dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
+ return PTR_ERR(ecc->clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return -EINVAL;
+ }
+
+ ecc->dev = dev;
+ mutex_init(&ecc->lock);
+ platform_set_drvdata(pdev, ecc);
+ dev_info(dev, "probed\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_ecc_suspend(struct device *dev)
+{
+ struct mtk_ecc *ecc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ecc->clk);
+
+ return 0;
+}
+
+static int mtk_ecc_resume(struct device *dev)
+{
+ struct mtk_ecc *ecc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(ecc->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
+#endif
+
+MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
+
+static struct platform_driver mtk_ecc_driver = {
+ .probe = mtk_ecc_probe,
+ .driver = {
+ .name = "mtk-ecc",
+ .of_match_table = of_match_ptr(mtk_ecc_dt_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mtk_ecc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(mtk_ecc_driver);
+
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand ECC Driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/mtd/nand/raw/mtk_ecc.h b/drivers/mtd/nand/raw/mtk_ecc.h
new file mode 100644
index 000000000..0e48c36e6
--- /dev/null
+++ b/drivers/mtd/nand/raw/mtk_ecc.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * MTK SDG1 ECC controller
+ *
+ * Copyright (c) 2016 Mediatek
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ */
+
+#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
+#define __DRIVERS_MTD_NAND_MTK_ECC_H__
+
+#include <linux/types.h>
+
+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+
+struct device_node;
+struct mtk_ecc;
+
+struct mtk_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct mtk_ecc_config {
+ enum mtk_ecc_operation op;
+ enum mtk_ecc_mode mode;
+ dma_addr_t addr;
+ u32 strength;
+ u32 sectors;
+ u32 len;
+};
+
+int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
+void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
+int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
+int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
+void mtk_ecc_disable(struct mtk_ecc *);
+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
+void mtk_ecc_release(struct mtk_ecc *);
+
+#endif
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
new file mode 100644
index 000000000..5c5c92132
--- /dev/null
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -0,0 +1,1682 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * MTK NAND Flash controller driver.
+ * Copyright (C) 2016 MediaTek Inc.
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include "mtk_ecc.h"
+
+/* NAND controller register definition */
+#define NFI_CNFG (0x00)
+#define CNFG_AHB BIT(0)
+#define CNFG_READ_EN BIT(1)
+#define CNFG_DMA_BURST_EN BIT(2)
+#define CNFG_BYTE_RW BIT(6)
+#define CNFG_HW_ECC_EN BIT(8)
+#define CNFG_AUTO_FMT_EN BIT(9)
+#define CNFG_OP_CUST (6 << 12)
+#define NFI_PAGEFMT (0x04)
+#define PAGEFMT_FDM_ECC_SHIFT (12)
+#define PAGEFMT_FDM_SHIFT (8)
+#define PAGEFMT_SEC_SEL_512 BIT(2)
+#define PAGEFMT_512_2K (0)
+#define PAGEFMT_2K_4K (1)
+#define PAGEFMT_4K_8K (2)
+#define PAGEFMT_8K_16K (3)
+/* NFI control */
+#define NFI_CON (0x08)
+#define CON_FIFO_FLUSH BIT(0)
+#define CON_NFI_RST BIT(1)
+#define CON_BRD BIT(8) /* burst read */
+#define CON_BWR BIT(9) /* burst write */
+#define CON_SEC_SHIFT (12)
+/* Timming control register */
+#define NFI_ACCCON (0x0C)
+#define NFI_INTR_EN (0x10)
+#define INTR_AHB_DONE_EN BIT(6)
+#define NFI_INTR_STA (0x14)
+#define NFI_CMD (0x20)
+#define NFI_ADDRNOB (0x30)
+#define NFI_COLADDR (0x34)
+#define NFI_ROWADDR (0x38)
+#define NFI_STRDATA (0x40)
+#define STAR_EN (1)
+#define STAR_DE (0)
+#define NFI_CNRNB (0x44)
+#define NFI_DATAW (0x50)
+#define NFI_DATAR (0x54)
+#define NFI_PIO_DIRDY (0x58)
+#define PIO_DI_RDY (0x01)
+#define NFI_STA (0x60)
+#define STA_CMD BIT(0)
+#define STA_ADDR BIT(1)
+#define STA_BUSY BIT(8)
+#define STA_EMP_PAGE BIT(12)
+#define NFI_FSM_CUSTDATA (0xe << 16)
+#define NFI_FSM_MASK (0xf << 16)
+#define NFI_ADDRCNTR (0x70)
+#define CNTR_MASK GENMASK(16, 12)
+#define ADDRCNTR_SEC_SHIFT (12)
+#define ADDRCNTR_SEC(val) \
+ (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
+#define NFI_STRADDR (0x80)
+#define NFI_BYTELEN (0x84)
+#define NFI_CSEL (0x90)
+#define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
+#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
+#define NFI_FDM_MAX_SIZE (8)
+#define NFI_FDM_MIN_SIZE (1)
+#define NFI_DEBUG_CON1 (0x220)
+#define STROBE_MASK GENMASK(4, 3)
+#define STROBE_SHIFT (3)
+#define MAX_STROBE_DLY (3)
+#define NFI_MASTER_STA (0x224)
+#define MASTER_STA_MASK (0x0FFF)
+#define NFI_EMPTY_THRESH (0x23C)
+
+#define MTK_NAME "mtk-nand"
+#define KB(x) ((x) * 1024UL)
+#define MB(x) (KB(x) * 1024UL)
+
+#define MTK_TIMEOUT (500000)
+#define MTK_RESET_TIMEOUT (1000000)
+#define MTK_NAND_MAX_NSELS (2)
+#define MTK_NFC_MIN_SPARE (16)
+#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
+ ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
+ (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
+
+struct mtk_nfc_caps {
+ const u8 *spare_size;
+ u8 num_spare_size;
+ u8 pageformat_spare_shift;
+ u8 nfi_clk_div;
+ u8 max_sector;
+ u32 max_sector_size;
+};
+
+struct mtk_nfc_bad_mark_ctl {
+ void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
+ u32 sec;
+ u32 pos;
+};
+
+/*
+ * FDM: region used to store free OOB data
+ */
+struct mtk_nfc_fdm {
+ u32 reg_size;
+ u32 ecc_size;
+};
+
+struct mtk_nfc_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+
+ struct mtk_nfc_bad_mark_ctl bad_mark;
+ struct mtk_nfc_fdm fdm;
+ u32 spare_per_sector;
+
+ int nsels;
+ u8 sels[];
+ /* nothing after this field */
+};
+
+struct mtk_nfc_clk {
+ struct clk *nfi_clk;
+ struct clk *pad_clk;
+};
+
+struct mtk_nfc {
+ struct nand_controller controller;
+ struct mtk_ecc_config ecc_cfg;
+ struct mtk_nfc_clk clk;
+ struct mtk_ecc *ecc;
+
+ struct device *dev;
+ const struct mtk_nfc_caps *caps;
+ void __iomem *regs;
+
+ struct completion done;
+ struct list_head chips;
+
+ u8 *buffer;
+
+ unsigned long assigned_cs;
+};
+
+/*
+ * supported spare size of each IP.
+ * order should be the same with the spare size bitfiled defination of
+ * register NFI_PAGEFMT.
+ */
+static const u8 spare_size_mt2701[] = {
+ 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 63, 64
+};
+
+static const u8 spare_size_mt2712[] = {
+ 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
+ 74
+};
+
+static const u8 spare_size_mt7622[] = {
+ 16, 26, 27, 28
+};
+
+static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct mtk_nfc_nand_chip, nand);
+}
+
+static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
+{
+ return (u8 *)p + i * chip->ecc.size;
+}
+
+static inline u8 *oob_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u8 *poi;
+
+ /* map the sector's FDM data to free oob:
+ * the beginning of the oob area stores the FDM data of bad mark sectors
+ */
+
+ if (i < mtk_nand->bad_mark.sec)
+ poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
+ else if (i == mtk_nand->bad_mark.sec)
+ poi = chip->oob_poi;
+ else
+ poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
+
+ return poi;
+}
+
+static inline int mtk_data_len(struct nand_chip *chip)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+
+ return chip->ecc.size + mtk_nand->spare_per_sector;
+}
+
+static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->buffer + i * mtk_data_len(chip);
+}
+
+static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
+}
+
+static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
+{
+ writel(val, nfc->regs + reg);
+}
+
+static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
+{
+ writew(val, nfc->regs + reg);
+}
+
+static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
+{
+ writeb(val, nfc->regs + reg);
+}
+
+static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
+{
+ return readl_relaxed(nfc->regs + reg);
+}
+
+static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
+{
+ return readw_relaxed(nfc->regs + reg);
+}
+
+static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
+{
+ return readb_relaxed(nfc->regs + reg);
+}
+
+static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ /* reset all registers and force the NFI master to terminate */
+ nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+
+ /* wait for the master to finish the last transaction */
+ ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
+ !(val & MASTER_STA_MASK), 50,
+ MTK_RESET_TIMEOUT);
+ if (ret)
+ dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
+ NFI_MASTER_STA, val);
+
+ /* ensure any status register affected by the NFI master is reset */
+ nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+ nfi_writew(nfc, STAR_DE, NFI_STRDATA);
+}
+
+static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ nfi_writel(nfc, command, NFI_CMD);
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+ !(val & STA_CMD), 10, MTK_TIMEOUT);
+ if (ret) {
+ dev_warn(dev, "nfi core timed out entering command mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ nfi_writel(nfc, addr, NFI_COLADDR);
+ nfi_writel(nfc, 0, NFI_ROWADDR);
+ nfi_writew(nfc, 1, NFI_ADDRNOB);
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+ !(val & STA_ADDR), 10, MTK_TIMEOUT);
+ if (ret) {
+ dev_warn(dev, "nfi core timed out entering address mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 fmt, spare, i;
+
+ if (!mtd->writesize)
+ return 0;
+
+ spare = mtk_nand->spare_per_sector;
+
+ switch (mtd->writesize) {
+ case 512:
+ fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
+ break;
+ case KB(2):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_512_2K;
+ break;
+ case KB(4):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_2K_4K;
+ break;
+ case KB(8):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_4K_8K;
+ break;
+ case KB(16):
+ fmt = PAGEFMT_8K_16K;
+ break;
+ default:
+ dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
+ return -EINVAL;
+ }
+
+ /*
+ * the hardware will double the value for this eccsize, so we need to
+ * halve it
+ */
+ if (chip->ecc.size == 1024)
+ spare >>= 1;
+
+ for (i = 0; i < nfc->caps->num_spare_size; i++) {
+ if (nfc->caps->spare_size[i] == spare)
+ break;
+ }
+
+ if (i == nfc->caps->num_spare_size) {
+ dev_err(nfc->dev, "invalid spare size %d\n", spare);
+ return -EINVAL;
+ }
+
+ fmt |= i << nfc->caps->pageformat_spare_shift;
+
+ fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
+ fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+ nfi_writel(nfc, fmt, NFI_PAGEFMT);
+
+ nfc->ecc_cfg.strength = chip->ecc.strength;
+ nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
+
+ return 0;
+}
+
+static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
+{
+ int rc;
+ u8 val;
+
+ rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
+ val & PIO_DI_RDY, 10, MTK_TIMEOUT);
+ if (rc < 0)
+ dev_err(nfc->dev, "data not ready\n");
+}
+
+static inline u8 mtk_nfc_read_byte(struct nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg;
+
+ /* after each byte read, the NFI_STA reg is reset by the hardware */
+ reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+ if (reg != NFI_FSM_CUSTDATA) {
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg |= CNFG_BYTE_RW | CNFG_READ_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ /*
+ * set to max sector to allow the HW to continue reading over
+ * unaligned accesses
+ */
+ reg = (nfc->caps->max_sector << CON_SEC_SHIFT) | CON_BRD;
+ nfi_writel(nfc, reg, NFI_CON);
+
+ /* trigger to fetch data */
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+ }
+
+ mtk_nfc_wait_ioready(nfc);
+
+ return nfi_readb(nfc, NFI_DATAR);
+}
+
+static void mtk_nfc_read_buf(struct nand_chip *chip, u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = mtk_nfc_read_byte(chip);
+}
+
+static void mtk_nfc_write_byte(struct nand_chip *chip, u8 byte)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg;
+
+ reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+
+ if (reg != NFI_FSM_CUSTDATA) {
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ reg = nfc->caps->max_sector << CON_SEC_SHIFT | CON_BWR;
+ nfi_writel(nfc, reg, NFI_CON);
+
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+ }
+
+ mtk_nfc_wait_ioready(nfc);
+ nfi_writeb(nfc, byte, NFI_DATAW);
+}
+
+static void mtk_nfc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ mtk_nfc_write_byte(chip, buf[i]);
+}
+
+static int mtk_nfc_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ unsigned int i;
+ u32 status;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ mtk_nfc_send_command(nfc, instr->ctx.cmd.opcode);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ mtk_nfc_send_address(nfc, instr->ctx.addr.addrs[i]);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ mtk_nfc_read_buf(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ mtk_nfc_write_buf(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ return readl_poll_timeout(nfc->regs + NFI_STA, status,
+ !(status & STA_BUSY), 20,
+ instr->ctx.waitrdy.timeout_ms * 1000);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static void mtk_nfc_select_target(struct nand_chip *nand, unsigned int cs)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
+
+ mtk_nfc_hw_runtime_config(nand_to_mtd(nand));
+
+ nfi_writel(nfc, mtk_nand->sels[cs], NFI_CSEL);
+}
+
+static int mtk_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ mtk_nfc_hw_reset(nfc);
+ nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
+ mtk_nfc_select_target(chip, op->cs);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = mtk_nfc_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_setup_interface(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ const struct nand_sdr_timings *timings;
+ u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
+ u32 temp, tsel = 0;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ rate = clk_get_rate(nfc->clk.nfi_clk);
+ /* There is a frequency divider in some IPs */
+ rate /= nfc->caps->nfi_clk_div;
+
+ /* turn clock rate into KHZ */
+ rate /= 1000;
+
+ tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000;
+ tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000);
+ tpoecs &= 0xf;
+
+ tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000;
+ tprecs = DIV_ROUND_UP(tprecs * rate, 1000000);
+ tprecs &= 0x3f;
+
+ /* sdr interface has no tCR which means CE# low to RE# low */
+ tc2r = 0;
+
+ tw2r = timings->tWHR_min / 1000;
+ tw2r = DIV_ROUND_UP(tw2r * rate, 1000000);
+ tw2r = DIV_ROUND_UP(tw2r - 1, 2);
+ tw2r &= 0xf;
+
+ twh = max(timings->tREH_min, timings->tWH_min) / 1000;
+ twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
+ twh &= 0xf;
+
+ /* Calculate real WE#/RE# hold time in nanosecond */
+ temp = (twh + 1) * 1000000 / rate;
+ /* nanosecond to picosecond */
+ temp *= 1000;
+
+ /*
+ * WE# low level time should be expaned to meet WE# pulse time
+ * and WE# cycle time at the same time.
+ */
+ if (temp < timings->tWC_min)
+ twst = timings->tWC_min - temp;
+ twst = max(timings->tWP_min, twst) / 1000;
+ twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
+ twst &= 0xf;
+
+ /*
+ * RE# low level time should be expaned to meet RE# pulse time
+ * and RE# cycle time at the same time.
+ */
+ if (temp < timings->tRC_min)
+ trlt = timings->tRC_min - temp;
+ trlt = max(trlt, timings->tRP_min) / 1000;
+ trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
+ trlt &= 0xf;
+
+ /* Calculate RE# pulse time in nanosecond. */
+ temp = (trlt + 1) * 1000000 / rate;
+ /* nanosecond to picosecond */
+ temp *= 1000;
+ /*
+ * If RE# access time is bigger than RE# pulse time,
+ * delay sampling data timing.
+ */
+ if (temp < timings->tREA_max) {
+ tsel = timings->tREA_max / 1000;
+ tsel = DIV_ROUND_UP(tsel * rate, 1000000);
+ tsel -= (trlt + 1);
+ if (tsel > MAX_STROBE_DLY) {
+ trlt += tsel - MAX_STROBE_DLY;
+ tsel = MAX_STROBE_DLY;
+ }
+ }
+ temp = nfi_readl(nfc, NFI_DEBUG_CON1);
+ temp &= ~STROBE_MASK;
+ temp |= tsel << STROBE_SHIFT;
+ nfi_writel(nfc, temp, NFI_DEBUG_CON1);
+
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:28: tpoecs, minimum required time for CS post pulling down after
+ * accessing the device
+ * 27:22: tprecs, minimum required time for CS pre pulling down before
+ * accessing the device
+ * 21:16: tc2r, minimum required time from NCEB low to NREB low
+ * 15:12: tw2r, minimum required time from NWEB high to NREB low.
+ * 11:08: twh, write enable hold time
+ * 07:04: twst, write wait states
+ * 03:00: trlt, read wait states
+ */
+ trlt = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
+ nfi_writel(nfc, trlt, NFI_ACCCON);
+
+ return 0;
+}
+
+static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ int size = chip->ecc.size + mtk_nand->fdm.reg_size;
+
+ nfc->ecc_cfg.mode = ECC_DMA_MODE;
+ nfc->ecc_cfg.op = ECC_ENCODE;
+
+ return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
+}
+
+static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
+{
+ /* nop */
+}
+
+static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
+ u32 bad_pos = nand->bad_mark.pos;
+
+ if (raw)
+ bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
+ else
+ bad_pos += nand->bad_mark.sec * chip->ecc.size;
+
+ swap(chip->oob_poi[0], buf[bad_pos]);
+}
+
+static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
+ u32 len, const u8 *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 start, end;
+ int i, ret;
+
+ start = offset / chip->ecc.size;
+ end = DIV_ROUND_UP(offset + len, chip->ecc.size);
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+ chip->ecc.size);
+
+ if (start > i || i >= end)
+ continue;
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+
+ /* program the CRC back to the OOB */
+ ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 i;
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (buf)
+ memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+ chip->ecc.size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+ }
+}
+
+static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
+ u32 sectors)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 vall, valm;
+ u8 *oobptr;
+ int i, j;
+
+ for (i = 0; i < sectors; i++) {
+ oobptr = oob_ptr(chip, start + i);
+ vall = nfi_readl(nfc, NFI_FDML(i));
+ valm = nfi_readl(nfc, NFI_FDMM(i));
+
+ for (j = 0; j < fdm->reg_size; j++)
+ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
+ }
+}
+
+static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 vall, valm;
+ u8 *oobptr;
+ int i, j;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ oobptr = oob_ptr(chip, i);
+ vall = 0;
+ valm = 0;
+ for (j = 0; j < 8; j++) {
+ if (j < 4)
+ vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+ << (j * 8);
+ else
+ valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+ << ((j - 4) * 8);
+ }
+ nfi_writel(nfc, vall, NFI_FDML(i));
+ nfi_writel(nfc, valm, NFI_FDMM(i));
+ }
+}
+
+static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int len)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct device *dev = nfc->dev;
+ dma_addr_t addr;
+ u32 reg;
+ int ret;
+
+ addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(nfc->dev, addr);
+ if (ret) {
+ dev_err(nfc->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
+ nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+ nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+
+ init_completion(&nfc->done);
+
+ reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
+ nfi_writel(nfc, reg, NFI_CON);
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+ ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(dev, "program ahb done timeout\n");
+ nfi_writew(nfc, 0, NFI_INTR_EN);
+ ret = -ETIMEDOUT;
+ goto timeout;
+ }
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
+ ADDRCNTR_SEC(reg) >= chip->ecc.steps,
+ 10, MTK_TIMEOUT);
+ if (ret)
+ dev_err(dev, "hwecc write timeout\n");
+
+timeout:
+
+ dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
+ nfi_writel(nfc, 0, NFI_CON);
+
+ return ret;
+}
+
+static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int raw)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ size_t len;
+ const u8 *bufpoi;
+ u32 reg;
+ int ret;
+
+ mtk_nfc_select_target(chip, chip->cur_cs);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ if (!raw) {
+ /* OOB => FDM: from register, ECC: from HW */
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
+ nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
+
+ nfc->ecc_cfg.op = ECC_ENCODE;
+ nfc->ecc_cfg.mode = ECC_NFI_MODE;
+ ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+ if (ret) {
+ /* clear NFI config */
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ return ret;
+ }
+
+ memcpy(nfc->buffer, buf, mtd->writesize);
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
+ bufpoi = nfc->buffer;
+
+ /* write OOB into the FDM registers (OOB area in MTK NAND) */
+ mtk_nfc_write_fdm(chip);
+ } else {
+ bufpoi = buf;
+ }
+
+ len = mtd->writesize + (raw ? mtd->oobsize : 0);
+ ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
+
+ if (!raw)
+ mtk_ecc_disable(nfc->ecc);
+
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int mtk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_on, int page)
+{
+ return mtk_nfc_write_page(nand_to_mtd(chip), chip, buf, page, 0);
+}
+
+static int mtk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_on, int pg)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ mtk_nfc_format_page(mtd, buf);
+ return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
+}
+
+static int mtk_nfc_write_subpage_hwecc(struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_on, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ int ret;
+
+ ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
+ if (ret < 0)
+ return ret;
+
+ /* use the data in the private buffer (now with FDM and CRC) */
+ return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
+}
+
+static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
+{
+ return mtk_nfc_write_page_raw(chip, NULL, 1, page);
+}
+
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
+ u32 sectors)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_ecc_stats stats;
+ u32 reg_size = mtk_nand->fdm.reg_size;
+ int rc, i;
+
+ rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
+ if (rc) {
+ memset(buf, 0xff, sectors * chip->ecc.size);
+ for (i = 0; i < sectors; i++)
+ memset(oob_ptr(chip, start + i), 0xff, reg_size);
+ return 0;
+ }
+
+ mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
+ mtd->ecc_stats.corrected += stats.corrected;
+ mtd->ecc_stats.failed += stats.failed;
+
+ return stats.bitflips;
+}
+
+static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ u32 data_offs, u32 readlen,
+ u8 *bufpoi, int page, int raw)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u32 spare = mtk_nand->spare_per_sector;
+ u32 column, sectors, start, end, reg;
+ dma_addr_t addr;
+ int bitflips = 0;
+ size_t len;
+ u8 *buf;
+ int rc;
+
+ mtk_nfc_select_target(chip, chip->cur_cs);
+ start = data_offs / chip->ecc.size;
+ end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+
+ sectors = end - start;
+ column = start * (chip->ecc.size + spare);
+
+ len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
+ buf = bufpoi + start * chip->ecc.size;
+
+ nand_read_page_op(chip, page, column, NULL, 0);
+
+ addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(nfc->dev, addr);
+ if (rc) {
+ dev_err(nfc->dev, "dma mapping error\n");
+
+ return -EINVAL;
+ }
+
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
+ if (!raw) {
+ reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ nfc->ecc_cfg.mode = ECC_NFI_MODE;
+ nfc->ecc_cfg.sectors = sectors;
+ nfc->ecc_cfg.op = ECC_DECODE;
+ rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+ if (rc) {
+ dev_err(nfc->dev, "ecc enable\n");
+ /* clear NFI_CNFG */
+ reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
+ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+ nfi_writew(nfc, reg, NFI_CNFG);
+ dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+ return rc;
+ }
+ } else {
+ nfi_writew(nfc, reg, NFI_CNFG);
+ }
+
+ nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
+ nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+ nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+
+ init_completion(&nfc->done);
+ reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
+ nfi_writel(nfc, reg, NFI_CON);
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+ rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+ if (!rc)
+ dev_warn(nfc->dev, "read ahb/dma done timeout\n");
+
+ rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
+ ADDRCNTR_SEC(reg) >= sectors, 10,
+ MTK_TIMEOUT);
+ if (rc < 0) {
+ dev_err(nfc->dev, "subpage done timeout\n");
+ bitflips = -EIO;
+ } else if (!raw) {
+ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+ bitflips = rc < 0 ? -ETIMEDOUT :
+ mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
+ mtk_nfc_read_fdm(chip, start, sectors);
+ }
+
+ dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+ if (raw)
+ goto done;
+
+ mtk_ecc_disable(nfc->ecc);
+
+ if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
+done:
+ nfi_writel(nfc, 0, NFI_CON);
+
+ return bitflips;
+}
+
+static int mtk_nfc_read_subpage_hwecc(struct nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg)
+{
+ return mtk_nfc_read_subpage(nand_to_mtd(chip), chip, off, len, p, pg,
+ 0);
+}
+
+static int mtk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *p, int oob_on,
+ int pg)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
+}
+
+static int mtk_nfc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_on,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ int i, ret;
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
+ page, 1);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ if (buf)
+ memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
+ chip->ecc.size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_oob_std(struct nand_chip *chip, int page)
+{
+ return mtk_nfc_read_page_raw(chip, NULL, 1, page);
+}
+
+static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
+{
+ /*
+ * CNRNB: nand ready/busy register
+ * -------------------------------
+ * 7:4: timeout register for polling the NAND busy/ready signal
+ * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
+ */
+ nfi_writew(nfc, 0xf1, NFI_CNRNB);
+ nfi_writel(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
+
+ mtk_nfc_hw_reset(nfc);
+
+ nfi_readl(nfc, NFI_INTR_STA);
+ nfi_writel(nfc, 0, NFI_INTR_EN);
+}
+
+static irqreturn_t mtk_nfc_irq(int irq, void *id)
+{
+ struct mtk_nfc *nfc = id;
+ u16 sta, ien;
+
+ sta = nfi_readw(nfc, NFI_INTR_STA);
+ ien = nfi_readw(nfc, NFI_INTR_EN);
+
+ if (!(sta & ien))
+ return IRQ_NONE;
+
+ nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
+ complete(&nfc->done);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk->nfi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable nfi clk\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(clk->pad_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pad clk\n");
+ clk_disable_unprepare(clk->nfi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
+{
+ clk_disable_unprepare(clk->nfi_clk);
+ clk_disable_unprepare(clk->pad_clk);
+}
+
+static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 eccsteps;
+
+ eccsteps = mtd->writesize / chip->ecc.size;
+
+ if (section >= eccsteps)
+ return -ERANGE;
+
+ oob_region->length = fdm->reg_size - fdm->ecc_size;
+ oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
+
+ return 0;
+}
+
+static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u32 eccsteps;
+
+ if (section)
+ return -ERANGE;
+
+ eccsteps = mtd->writesize / chip->ecc.size;
+ oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
+ oob_region->length = mtd->oobsize - oob_region->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
+ .free = mtk_nfc_ooblayout_free,
+ .ecc = mtk_nfc_ooblayout_ecc,
+};
+
+static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ u32 ecc_bytes;
+
+ ecc_bytes = DIV_ROUND_UP(nand->ecc.strength *
+ mtk_ecc_get_parity_bits(nfc->ecc), 8);
+
+ fdm->reg_size = chip->spare_per_sector - ecc_bytes;
+ if (fdm->reg_size > NFI_FDM_MAX_SIZE)
+ fdm->reg_size = NFI_FDM_MAX_SIZE;
+
+ /* bad block mark storage */
+ fdm->ecc_size = 1;
+}
+
+static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
+ struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (mtd->writesize == 512) {
+ bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
+ } else {
+ bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
+ bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
+ bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
+ }
+}
+
+static int mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ const u8 *spare = nfc->caps->spare_size;
+ u32 eccsteps, i, closest_spare = 0;
+
+ eccsteps = mtd->writesize / nand->ecc.size;
+ *sps = mtd->oobsize / eccsteps;
+
+ if (nand->ecc.size == 1024)
+ *sps >>= 1;
+
+ if (*sps < MTK_NFC_MIN_SPARE)
+ return -EINVAL;
+
+ for (i = 0; i < nfc->caps->num_spare_size; i++) {
+ if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
+ closest_spare = i;
+ if (*sps == spare[i])
+ break;
+ }
+ }
+
+ *sps = spare[closest_spare];
+
+ if (nand->ecc.size == 1024)
+ *sps <<= 1;
+
+ return 0;
+}
+
+static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&nand->base);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ u32 spare;
+ int free, ret;
+
+ /* support only ecc hw mode */
+ if (nand->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ dev_err(dev, "ecc.engine_type not supported\n");
+ return -EINVAL;
+ }
+
+ /* if optional dt settings not present */
+ if (!nand->ecc.size || !nand->ecc.strength) {
+ /* use datasheet requirements */
+ nand->ecc.strength = requirements->strength;
+ nand->ecc.size = requirements->step_size;
+
+ /*
+ * align eccstrength and eccsize
+ * this controller only supports 512 and 1024 sizes
+ */
+ if (nand->ecc.size < 1024) {
+ if (mtd->writesize > 512 &&
+ nfc->caps->max_sector_size > 512) {
+ nand->ecc.size = 1024;
+ nand->ecc.strength <<= 1;
+ } else {
+ nand->ecc.size = 512;
+ }
+ } else {
+ nand->ecc.size = 1024;
+ }
+
+ ret = mtk_nfc_set_spare_per_sector(&spare, mtd);
+ if (ret)
+ return ret;
+
+ /* calculate oob bytes except ecc parity data */
+ free = (nand->ecc.strength * mtk_ecc_get_parity_bits(nfc->ecc)
+ + 7) >> 3;
+ free = spare - free;
+
+ /*
+ * enhance ecc strength if oob left is bigger than max FDM size
+ * or reduce ecc strength if oob size is not enough for ecc
+ * parity data.
+ */
+ if (free > NFI_FDM_MAX_SIZE) {
+ spare -= NFI_FDM_MAX_SIZE;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
+ } else if (free < 0) {
+ spare -= NFI_FDM_MIN_SIZE;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
+ }
+ }
+
+ mtk_ecc_adjust_strength(nfc->ecc, &nand->ecc.strength);
+
+ dev_info(dev, "eccsize %d eccstrength %d\n",
+ nand->ecc.size, nand->ecc.strength);
+
+ return 0;
+}
+
+static int mtk_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ int len;
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16bits buswidth not supported");
+ return -EINVAL;
+ }
+
+ /* store bbt magic in page, cause OOB is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ ret = mtk_nfc_ecc_init(dev, mtd);
+ if (ret)
+ return ret;
+
+ ret = mtk_nfc_set_spare_per_sector(&mtk_nand->spare_per_sector, mtd);
+ if (ret)
+ return ret;
+
+ mtk_nfc_set_fdm(&mtk_nand->fdm, mtd);
+ mtk_nfc_set_bad_mark_ctl(&mtk_nand->bad_mark, mtd);
+
+ len = mtd->writesize + mtd->oobsize;
+ nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!nfc->buffer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct nand_controller_ops mtk_nfc_controller_ops = {
+ .attach_chip = mtk_nfc_attach_chip,
+ .setup_interface = mtk_nfc_setup_interface,
+ .exec_op = mtk_nfc_exec_op,
+};
+
+static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
+ struct device_node *np)
+{
+ struct mtk_nfc_nand_chip *chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int nsels;
+ u32 tmp;
+ int ret;
+ int i;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -ENODEV;
+
+ nsels /= sizeof(u32);
+ if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->nsels = nsels;
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "reg property failure : %d\n", ret);
+ return ret;
+ }
+
+ if (tmp >= MTK_NAND_MAX_NSELS) {
+ dev_err(dev, "invalid CS: %u\n", tmp);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %u already assigned\n", tmp);
+ return -EINVAL;
+ }
+
+ chip->sels[i] = tmp;
+ }
+
+ nand = &chip->nand;
+ nand->controller = &nfc->controller;
+
+ nand_set_flash_node(nand, np);
+ nand_set_controller_data(nand, nfc);
+
+ nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
+
+ /* set default mode in case dt entry is missing */
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
+ nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
+ nand->ecc.write_page = mtk_nfc_write_page_hwecc;
+ nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
+ nand->ecc.write_oob = mtk_nfc_write_oob_std;
+
+ nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
+ nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
+ nand->ecc.read_page = mtk_nfc_read_page_hwecc;
+ nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
+ nand->ecc.read_oob = mtk_nfc_read_oob_std;
+
+ mtd = nand_to_mtd(nand);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+ mtd->name = MTK_NAME;
+ mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
+
+ mtk_nfc_hw_init(nfc);
+
+ ret = nand_scan(nand, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "mtd parse partition error\n");
+ nand_cleanup(nand);
+ return ret;
+ }
+
+ list_add_tail(&chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
+ .spare_size = spare_size_mt2701,
+ .num_spare_size = 16,
+ .pageformat_spare_shift = 4,
+ .nfi_clk_div = 1,
+ .max_sector = 16,
+ .max_sector_size = 1024,
+};
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
+ .spare_size = spare_size_mt2712,
+ .num_spare_size = 19,
+ .pageformat_spare_shift = 16,
+ .nfi_clk_div = 2,
+ .max_sector = 16,
+ .max_sector_size = 1024,
+};
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt7622 = {
+ .spare_size = spare_size_mt7622,
+ .num_spare_size = 4,
+ .pageformat_spare_shift = 4,
+ .nfi_clk_div = 1,
+ .max_sector = 8,
+ .max_sector_size = 512,
+};
+
+static const struct of_device_id mtk_nfc_id_table[] = {
+ {
+ .compatible = "mediatek,mt2701-nfc",
+ .data = &mtk_nfc_caps_mt2701,
+ }, {
+ .compatible = "mediatek,mt2712-nfc",
+ .data = &mtk_nfc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-nfc",
+ .data = &mtk_nfc_caps_mt7622,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
+
+static int mtk_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mtk_nfc *nfc;
+ struct resource *res;
+ int ret, irq;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+ nfc->controller.ops = &mtk_nfc_controller_ops;
+
+ /* probe defer if not ready */
+ nfc->ecc = of_mtk_ecc_get(np);
+ if (IS_ERR(nfc->ecc))
+ return PTR_ERR(nfc->ecc);
+ else if (!nfc->ecc)
+ return -ENODEV;
+
+ nfc->caps = of_device_get_match_data(dev);
+ nfc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->regs)) {
+ ret = PTR_ERR(nfc->regs);
+ goto release_ecc;
+ }
+
+ nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
+ if (IS_ERR(nfc->clk.nfi_clk)) {
+ dev_err(dev, "no clk\n");
+ ret = PTR_ERR(nfc->clk.nfi_clk);
+ goto release_ecc;
+ }
+
+ nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
+ if (IS_ERR(nfc->clk.pad_clk)) {
+ dev_err(dev, "no pad clk\n");
+ ret = PTR_ERR(nfc->clk.pad_clk);
+ goto release_ecc;
+ }
+
+ ret = mtk_nfc_enable_clk(dev, &nfc->clk);
+ if (ret)
+ goto release_ecc;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
+ if (ret) {
+ dev_err(dev, "failed to request nfi irq\n");
+ goto clk_disable;
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set dma mask\n");
+ goto clk_disable;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = mtk_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init nand chips\n");
+ goto clk_disable;
+ }
+
+ return 0;
+
+clk_disable:
+ mtk_nfc_disable_clk(&nfc->clk);
+
+release_ecc:
+ mtk_ecc_release(nfc->ecc);
+
+ return ret;
+}
+
+static int mtk_nfc_remove(struct platform_device *pdev)
+{
+ struct mtk_nfc *nfc = platform_get_drvdata(pdev);
+ struct mtk_nfc_nand_chip *mtk_chip;
+ struct nand_chip *chip;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ mtk_chip = list_first_entry(&nfc->chips,
+ struct mtk_nfc_nand_chip, node);
+ chip = &mtk_chip->nand;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&mtk_chip->node);
+ }
+
+ mtk_ecc_release(nfc->ecc);
+ mtk_nfc_disable_clk(&nfc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nfc_suspend(struct device *dev)
+{
+ struct mtk_nfc *nfc = dev_get_drvdata(dev);
+
+ mtk_nfc_disable_clk(&nfc->clk);
+
+ return 0;
+}
+
+static int mtk_nfc_resume(struct device *dev)
+{
+ struct mtk_nfc *nfc = dev_get_drvdata(dev);
+ struct mtk_nfc_nand_chip *chip;
+ struct nand_chip *nand;
+ int ret;
+ u32 i;
+
+ udelay(200);
+
+ ret = mtk_nfc_enable_clk(dev, &nfc->clk);
+ if (ret)
+ return ret;
+
+ /* reset NAND chip if VCC was powered off */
+ list_for_each_entry(chip, &nfc->chips, node) {
+ nand = &chip->nand;
+ for (i = 0; i < chip->nsels; i++)
+ nand_reset(nand, i);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
+#endif
+
+static struct platform_driver mtk_nfc_driver = {
+ .probe = mtk_nfc_probe,
+ .remove = mtk_nfc_remove,
+ .driver = {
+ .name = MTK_NAME,
+ .of_match_table = mtk_nfc_id_table,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mtk_nfc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(mtk_nfc_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
new file mode 100644
index 000000000..684c51e5e
--- /dev/null
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -0,0 +1,1941 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_data/mtd-mxc_nand.h>
+
+#define DRIVER_NAME "mxc_nand"
+
+/* Addresses for NFC registers */
+#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
+#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04)
+#define NFC_V1_V2_FLASH_ADDR (host->regs + 0x06)
+#define NFC_V1_V2_FLASH_CMD (host->regs + 0x08)
+#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
+#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
+#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
+#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
+#define NFC_V1_V2_WRPROT (host->regs + 0x12)
+#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
+#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
+#define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20)
+#define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24)
+#define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28)
+#define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c)
+#define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22)
+#define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26)
+#define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a)
+#define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e)
+#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
+#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
+#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
+
+#define NFC_V2_CONFIG1_ECC_MODE_4 (1 << 0)
+#define NFC_V1_V2_CONFIG1_SP_EN (1 << 2)
+#define NFC_V1_V2_CONFIG1_ECC_EN (1 << 3)
+#define NFC_V1_V2_CONFIG1_INT_MSK (1 << 4)
+#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
+#define NFC_V1_V2_CONFIG1_RST (1 << 6)
+#define NFC_V1_V2_CONFIG1_CE (1 << 7)
+#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
+#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
+#define NFC_V2_CONFIG1_FP_INT (1 << 11)
+
+#define NFC_V1_V2_CONFIG2_INT (1 << 15)
+
+/*
+ * Operation modes for the NFC. Valid for v1, v2 and v3
+ * type controllers.
+ */
+#define NFC_CMD (1 << 0)
+#define NFC_ADDR (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+
+#define NFC_V3_FLASH_CMD (host->regs_axi + 0x00)
+#define NFC_V3_FLASH_ADDR0 (host->regs_axi + 0x04)
+
+#define NFC_V3_CONFIG1 (host->regs_axi + 0x34)
+#define NFC_V3_CONFIG1_SP_EN (1 << 0)
+#define NFC_V3_CONFIG1_RBA(x) (((x) & 0x7 ) << 4)
+
+#define NFC_V3_ECC_STATUS_RESULT (host->regs_axi + 0x38)
+
+#define NFC_V3_LAUNCH (host->regs_axi + 0x40)
+
+#define NFC_V3_WRPROT (host->regs_ip + 0x0)
+#define NFC_V3_WRPROT_LOCK_TIGHT (1 << 0)
+#define NFC_V3_WRPROT_LOCK (1 << 1)
+#define NFC_V3_WRPROT_UNLOCK (1 << 2)
+#define NFC_V3_WRPROT_BLS_UNLOCK (2 << 6)
+
+#define NFC_V3_WRPROT_UNLOCK_BLK_ADD0 (host->regs_ip + 0x04)
+
+#define NFC_V3_CONFIG2 (host->regs_ip + 0x24)
+#define NFC_V3_CONFIG2_PS_512 (0 << 0)
+#define NFC_V3_CONFIG2_PS_2048 (1 << 0)
+#define NFC_V3_CONFIG2_PS_4096 (2 << 0)
+#define NFC_V3_CONFIG2_ONE_CYCLE (1 << 2)
+#define NFC_V3_CONFIG2_ECC_EN (1 << 3)
+#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
+#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
+#define NFC_V3_CONFIG2_PPB(x, shift) (((x) & 0x3) << shift)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
+#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
+#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
+#define NFC_V3_CONFIG2_SPAS(x) (((x) & 0xff) << 16)
+
+#define NFC_V3_CONFIG3 (host->regs_ip + 0x28)
+#define NFC_V3_CONFIG3_ADD_OP(x) (((x) & 0x3) << 0)
+#define NFC_V3_CONFIG3_FW8 (1 << 3)
+#define NFC_V3_CONFIG3_SBB(x) (((x) & 0x7) << 8)
+#define NFC_V3_CONFIG3_NUM_OF_DEVICES(x) (((x) & 0x7) << 12)
+#define NFC_V3_CONFIG3_RBB_MODE (1 << 15)
+#define NFC_V3_CONFIG3_NO_SDMA (1 << 20)
+
+#define NFC_V3_IPC (host->regs_ip + 0x2C)
+#define NFC_V3_IPC_CREQ (1 << 0)
+#define NFC_V3_IPC_INT (1 << 31)
+
+#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
+
+struct mxc_nand_host;
+
+struct mxc_nand_devtype_data {
+ void (*preset)(struct mtd_info *);
+ int (*read_page)(struct nand_chip *chip, void *buf, void *oob, bool ecc,
+ int page);
+ void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_page)(struct mtd_info *, unsigned int);
+ void (*send_read_id)(struct mxc_nand_host *);
+ uint16_t (*get_dev_status)(struct mxc_nand_host *);
+ int (*check_int)(struct mxc_nand_host *);
+ void (*irq_control)(struct mxc_nand_host *, int);
+ u32 (*get_ecc_status)(struct mxc_nand_host *);
+ const struct mtd_ooblayout_ops *ooblayout;
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ int (*setup_interface)(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf);
+ void (*enable_hwecc)(struct nand_chip *chip, bool enable);
+
+ /*
+ * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
+ * (CONFIG1:INT_MSK is set). To handle this the driver uses
+ * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
+ */
+ int irqpending_quirk;
+ int needs_ip;
+
+ size_t regs_offset;
+ size_t spare0_offset;
+ size_t axi_offset;
+
+ int spare_len;
+ int eccbytes;
+ int eccsize;
+ int ppb_shift;
+};
+
+struct mxc_nand_host {
+ struct nand_chip nand;
+ struct device *dev;
+
+ void __iomem *spare0;
+ void __iomem *main_area0;
+
+ void __iomem *base;
+ void __iomem *regs;
+ void __iomem *regs_axi;
+ void __iomem *regs_ip;
+ int status_request;
+ struct clk *clk;
+ int clk_act;
+ int irq;
+ int eccsize;
+ int used_oobsize;
+ int active_cs;
+
+ struct completion op_completion;
+
+ uint8_t *data_buf;
+ unsigned int buf_start;
+
+ const struct mxc_nand_devtype_data *devtype_data;
+ struct mxc_nand_platform_data pdata;
+};
+
+static const char * const part_probes[] = {
+ "cmdlinepart", "RedBoot", "ofpart", NULL };
+
+static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
+{
+ int i;
+ u32 *t = trg;
+ const __iomem u32 *s = src;
+
+ for (i = 0; i < (size >> 2); i++)
+ *t++ = __raw_readl(s++);
+}
+
+static void memcpy16_fromio(void *trg, const void __iomem *src, size_t size)
+{
+ int i;
+ u16 *t = trg;
+ const __iomem u16 *s = src;
+
+ /* We assume that src (IO) is always 32bit aligned */
+ if (PTR_ALIGN(trg, 4) == trg && IS_ALIGNED(size, 4)) {
+ memcpy32_fromio(trg, src, size);
+ return;
+ }
+
+ for (i = 0; i < (size >> 1); i++)
+ *t++ = __raw_readw(s++);
+}
+
+static inline void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+ /* __iowrite32_copy use 32bit size values so divide by 4 */
+ __iowrite32_copy(trg, src, size / 4);
+}
+
+static void memcpy16_toio(void __iomem *trg, const void *src, int size)
+{
+ int i;
+ __iomem u16 *t = trg;
+ const u16 *s = src;
+
+ /* We assume that trg (IO) is always 32bit aligned */
+ if (PTR_ALIGN(src, 4) == src && IS_ALIGNED(size, 4)) {
+ memcpy32_toio(trg, src, size);
+ return;
+ }
+
+ for (i = 0; i < (size >> 1); i++)
+ __raw_writew(*s++, t++);
+}
+
+/*
+ * The controller splits a page into data chunks of 512 bytes + partial oob.
+ * There are writesize / 512 such chunks, the size of the partial oob parts is
+ * oobsize / #chunks rounded down to a multiple of 2. The last oob chunk then
+ * contains additionally the byte lost by rounding (if any).
+ * This function handles the needed shuffling between host->data_buf (which
+ * holds a page in natural order, i.e. writesize bytes data + oobsize bytes
+ * spare) and the NFC buffer.
+ */
+static void copy_spare(struct mtd_info *mtd, bool bfrom, void *buf)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(this);
+ u16 i, oob_chunk_size;
+ u16 num_chunks = mtd->writesize / 512;
+
+ u8 *d = buf;
+ u8 __iomem *s = host->spare0;
+ u16 sparebuf_size = host->devtype_data->spare_len;
+
+ /* size of oob chunk for all but possibly the last one */
+ oob_chunk_size = (host->used_oobsize / num_chunks) & ~1;
+
+ if (bfrom) {
+ for (i = 0; i < num_chunks - 1; i++)
+ memcpy16_fromio(d + i * oob_chunk_size,
+ s + i * sparebuf_size,
+ oob_chunk_size);
+
+ /* the last chunk */
+ memcpy16_fromio(d + i * oob_chunk_size,
+ s + i * sparebuf_size,
+ host->used_oobsize - i * oob_chunk_size);
+ } else {
+ for (i = 0; i < num_chunks - 1; i++)
+ memcpy16_toio(&s[i * sparebuf_size],
+ &d[i * oob_chunk_size],
+ oob_chunk_size);
+
+ /* the last chunk */
+ memcpy16_toio(&s[i * sparebuf_size],
+ &d[i * oob_chunk_size],
+ host->used_oobsize - i * oob_chunk_size);
+ }
+}
+
+/*
+ * MXC NANDFC can only perform full page+spare or spare-only read/write. When
+ * the upper layers perform a read/write buf operation, the saved column address
+ * is used to index into the full page. So usually this function is called with
+ * column == 0 (unless no column cycle is needed indicated by column == -1)
+ */
+static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ /* Write out column address, if necessary */
+ if (column != -1) {
+ host->devtype_data->send_addr(host, column & 0xff,
+ page_addr == -1);
+ if (mtd->writesize > 512)
+ /* another col addr cycle for 2k page */
+ host->devtype_data->send_addr(host,
+ (column >> 8) & 0xff,
+ false);
+ }
+
+ /* Write out page address, if necessary */
+ if (page_addr != -1) {
+ /* paddr_0 - p_addr_7 */
+ host->devtype_data->send_addr(host, (page_addr & 0xff), false);
+
+ if (mtd->writesize > 512) {
+ if (mtd->size >= 0x10000000) {
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
+ } else
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
+ } else {
+ if (nand_chip->options & NAND_ROW_ADDR_3) {
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
+ } else
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
+ }
+ }
+}
+
+static int check_int_v3(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_IPC);
+ if (!(tmp & NFC_V3_IPC_INT))
+ return 0;
+
+ tmp &= ~NFC_V3_IPC_INT;
+ writel(tmp, NFC_V3_IPC);
+
+ return 1;
+}
+
+static int check_int_v1_v2(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+ tmp = readw(NFC_V1_V2_CONFIG2);
+ if (!(tmp & NFC_V1_V2_CONFIG2_INT))
+ return 0;
+
+ if (!host->devtype_data->irqpending_quirk)
+ writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
+
+ return 1;
+}
+
+static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
+{
+ uint16_t tmp;
+
+ tmp = readw(NFC_V1_V2_CONFIG1);
+
+ if (activate)
+ tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
+ else
+ tmp |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ writew(tmp, NFC_V1_V2_CONFIG1);
+}
+
+static void irq_control_v3(struct mxc_nand_host *host, int activate)
+{
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_CONFIG2);
+
+ if (activate)
+ tmp &= ~NFC_V3_CONFIG2_INT_MSK;
+ else
+ tmp |= NFC_V3_CONFIG2_INT_MSK;
+
+ writel(tmp, NFC_V3_CONFIG2);
+}
+
+static void irq_control(struct mxc_nand_host *host, int activate)
+{
+ if (host->devtype_data->irqpending_quirk) {
+ if (activate)
+ enable_irq(host->irq);
+ else
+ disable_irq_nosync(host->irq);
+ } else {
+ host->devtype_data->irq_control(host, activate);
+ }
+}
+
+static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+{
+ return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+{
+ return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+{
+ return readl(NFC_V3_ECC_STATUS_RESULT);
+}
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+ struct mxc_nand_host *host = dev_id;
+
+ if (!host->devtype_data->check_int(host))
+ return IRQ_NONE;
+
+ irq_control(host, 0);
+
+ complete(&host->op_completion);
+
+ return IRQ_HANDLED;
+}
+
+/* This function polls the NANDFC to wait for the basic operation to
+ * complete by checking the INT bit of config2 register.
+ */
+static int wait_op_done(struct mxc_nand_host *host, int useirq)
+{
+ int ret = 0;
+
+ /*
+ * If operation is already complete, don't bother to setup an irq or a
+ * loop.
+ */
+ if (host->devtype_data->check_int(host))
+ return 0;
+
+ if (useirq) {
+ unsigned long timeout;
+
+ reinit_completion(&host->op_completion);
+
+ irq_control(host, 1);
+
+ timeout = wait_for_completion_timeout(&host->op_completion, HZ);
+ if (!timeout && !host->devtype_data->check_int(host)) {
+ dev_dbg(host->dev, "timeout waiting for irq\n");
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ int max_retries = 8000;
+ int done;
+
+ do {
+ udelay(1);
+
+ done = host->devtype_data->check_int(host);
+ if (done)
+ break;
+
+ } while (--max_retries);
+
+ if (!done) {
+ dev_dbg(host->dev, "timeout polling for completion\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ WARN_ONCE(ret < 0, "timeout! useirq=%d\n", useirq);
+
+ return ret;
+}
+
+static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+ /* fill command */
+ writel(cmd, NFC_V3_FLASH_CMD);
+
+ /* send out command */
+ writel(NFC_CMD, NFC_V3_LAUNCH);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+}
+
+/* This function issues the specified command to the NAND device and
+ * waits for completion. */
+static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+ dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+
+ writew(cmd, NFC_V1_V2_FLASH_CMD);
+ writew(NFC_CMD, NFC_V1_V2_CONFIG2);
+
+ if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
+ int max_retries = 100;
+ /* Reset completion is indicated by NFC_CONFIG2 */
+ /* being set to 0 */
+ while (max_retries-- > 0) {
+ if (readw(NFC_V1_V2_CONFIG2) == 0) {
+ break;
+ }
+ udelay(1);
+ }
+ if (max_retries < 0)
+ dev_dbg(host->dev, "%s: RESET failed\n", __func__);
+ } else {
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+ }
+}
+
+static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+ /* fill address */
+ writel(addr, NFC_V3_FLASH_ADDR0);
+
+ /* send out address */
+ writel(NFC_ADDR, NFC_V3_LAUNCH);
+
+ wait_op_done(host, 0);
+}
+
+/* This function sends an address (or partial address) to the
+ * NAND device. The address is used to select the source/destination for
+ * a NAND command. */
+static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+ dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
+
+ writew(addr, NFC_V1_V2_FLASH_ADDR);
+ writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, islast);
+}
+
+static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_CONFIG1);
+ tmp &= ~(7 << 4);
+ writel(tmp, NFC_V3_CONFIG1);
+
+ /* transfer data from NFC ram to nand */
+ writel(ops, NFC_V3_LAUNCH);
+
+ wait_op_done(host, false);
+}
+
+static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ writew(ops, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+}
+
+static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ int bufs, i;
+
+ if (mtd->writesize > 512)
+ bufs = 4;
+ else
+ bufs = 1;
+
+ for (i = 0; i < bufs; i++) {
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
+
+ writew(ops, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+ }
+}
+
+static void send_read_id_v3(struct mxc_nand_host *host)
+{
+ /* Read ID into main buffer */
+ writel(NFC_ID, NFC_V3_LAUNCH);
+
+ wait_op_done(host, true);
+
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
+}
+
+/* Request the NANDFC to perform a read of the NAND device ID. */
+static void send_read_id_v1_v2(struct mxc_nand_host *host)
+{
+ /* NANDFC buffer 0 is used for device ID output */
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ writew(NFC_ID, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
+}
+
+static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
+{
+ writew(NFC_STATUS, NFC_V3_LAUNCH);
+ wait_op_done(host, true);
+
+ return readl(NFC_V3_CONFIG1) >> 16;
+}
+
+/* This function requests the NANDFC to perform a read of the
+ * NAND device status and returns the current status. */
+static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
+{
+ void __iomem *main_buf = host->main_area0;
+ uint32_t store;
+ uint16_t ret;
+
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ /*
+ * The device status is stored in main_area0. To
+ * prevent corruption of the buffer save the value
+ * and restore it afterwards.
+ */
+ store = readl(main_buf);
+
+ writew(NFC_STATUS, NFC_V1_V2_CONFIG2);
+ wait_op_done(host, true);
+
+ ret = readw(main_buf);
+
+ writel(store, main_buf);
+
+ return ret;
+}
+
+static void mxc_nand_enable_hwecc_v1_v2(struct nand_chip *chip, bool enable)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ uint16_t config1;
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return;
+
+ config1 = readw(NFC_V1_V2_CONFIG1);
+
+ if (enable)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+ else
+ config1 &= ~NFC_V1_V2_CONFIG1_ECC_EN;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+}
+
+static void mxc_nand_enable_hwecc_v3(struct nand_chip *chip, bool enable)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ uint32_t config2;
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return;
+
+ config2 = readl(NFC_V3_CONFIG2);
+
+ if (enable)
+ config2 |= NFC_V3_CONFIG2_ECC_EN;
+ else
+ config2 &= ~NFC_V3_CONFIG2_ECC_EN;
+
+ writel(config2, NFC_V3_CONFIG2);
+}
+
+/* This functions is used by upper layer to checks if device is ready */
+static int mxc_nand_dev_ready(struct nand_chip *chip)
+{
+ /*
+ * NFC handles R/B internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+static int mxc_nand_read_page_v1(struct nand_chip *chip, void *buf, void *oob,
+ bool ecc, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ unsigned int bitflips_corrected = 0;
+ int no_subpages;
+ int i;
+
+ host->devtype_data->enable_hwecc(chip, ecc);
+
+ host->devtype_data->send_cmd(host, NAND_CMD_READ0, false);
+ mxc_do_addr_cycle(mtd, 0, page);
+
+ if (mtd->writesize > 512)
+ host->devtype_data->send_cmd(host, NAND_CMD_READSTART, true);
+
+ no_subpages = mtd->writesize >> 9;
+
+ for (i = 0; i < no_subpages; i++) {
+ uint16_t ecc_stats;
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
+
+ writew(NFC_OUTPUT, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+
+ ecc_stats = get_ecc_status_v1(host);
+
+ ecc_stats >>= 2;
+
+ if (buf && ecc) {
+ switch (ecc_stats & 0x3) {
+ case 0:
+ default:
+ break;
+ case 1:
+ mtd->ecc_stats.corrected++;
+ bitflips_corrected = 1;
+ break;
+ case 2:
+ mtd->ecc_stats.failed++;
+ break;
+ }
+ }
+ }
+
+ if (buf)
+ memcpy32_fromio(buf, host->main_area0, mtd->writesize);
+ if (oob)
+ copy_spare(mtd, true, oob);
+
+ return bitflips_corrected;
+}
+
+static int mxc_nand_read_page_v2_v3(struct nand_chip *chip, void *buf,
+ void *oob, bool ecc, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ unsigned int max_bitflips = 0;
+ u32 ecc_stat, err;
+ int no_subpages;
+ u8 ecc_bit_mask, err_limit;
+
+ host->devtype_data->enable_hwecc(chip, ecc);
+
+ host->devtype_data->send_cmd(host, NAND_CMD_READ0, false);
+ mxc_do_addr_cycle(mtd, 0, page);
+
+ if (mtd->writesize > 512)
+ host->devtype_data->send_cmd(host,
+ NAND_CMD_READSTART, true);
+
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+
+ if (buf)
+ memcpy32_fromio(buf, host->main_area0, mtd->writesize);
+ if (oob)
+ copy_spare(mtd, true, oob);
+
+ ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
+ err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
+
+ no_subpages = mtd->writesize >> 9;
+
+ ecc_stat = host->devtype_data->get_ecc_status(host);
+
+ do {
+ err = ecc_stat & ecc_bit_mask;
+ if (err > err_limit) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += err;
+ max_bitflips = max_t(unsigned int, max_bitflips, err);
+ }
+
+ ecc_stat >>= 4;
+ } while (--no_subpages);
+
+ return max_bitflips;
+}
+
+static int mxc_nand_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ void *oob_buf;
+
+ if (oob_required)
+ oob_buf = chip->oob_poi;
+ else
+ oob_buf = NULL;
+
+ return host->devtype_data->read_page(chip, buf, oob_buf, 1, page);
+}
+
+static int mxc_nand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ void *oob_buf;
+
+ if (oob_required)
+ oob_buf = chip->oob_poi;
+ else
+ oob_buf = NULL;
+
+ return host->devtype_data->read_page(chip, buf, oob_buf, 0, page);
+}
+
+static int mxc_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ return host->devtype_data->read_page(chip, NULL, chip->oob_poi, 0,
+ page);
+}
+
+static int mxc_nand_write_page(struct nand_chip *chip, const uint8_t *buf,
+ bool ecc, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ host->devtype_data->enable_hwecc(chip, ecc);
+
+ host->devtype_data->send_cmd(host, NAND_CMD_SEQIN, false);
+ mxc_do_addr_cycle(mtd, 0, page);
+
+ memcpy32_toio(host->main_area0, buf, mtd->writesize);
+ copy_spare(mtd, false, chip->oob_poi);
+
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+ host->devtype_data->send_cmd(host, NAND_CMD_PAGEPROG, true);
+ mxc_do_addr_cycle(mtd, 0, page);
+
+ return 0;
+}
+
+static int mxc_nand_write_page_ecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ return mxc_nand_write_page(chip, buf, true, page);
+}
+
+static int mxc_nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ return mxc_nand_write_page(chip, buf, false, page);
+}
+
+static int mxc_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ memset(host->data_buf, 0xff, mtd->writesize);
+
+ return mxc_nand_write_page(chip, host->data_buf, false, page);
+}
+
+static u_char mxc_nand_read_byte(struct nand_chip *nand_chip)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint8_t ret;
+
+ /* Check for status request */
+ if (host->status_request)
+ return host->devtype_data->get_dev_status(host) & 0xFF;
+
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ /* only take the lower byte of each word */
+ ret = *(uint16_t *)(host->data_buf + host->buf_start);
+
+ host->buf_start += 2;
+ } else {
+ ret = *(uint8_t *)(host->data_buf + host->buf_start);
+ host->buf_start++;
+ }
+
+ dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
+ return ret;
+}
+
+/* Write data of length len to buffer buf. The data to be
+ * written on NAND Flash is first copied to RAMbuffer. After the Data Input
+ * Operation by the NFC, the data is written to NAND Flash */
+static void mxc_nand_write_buf(struct nand_chip *nand_chip, const u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ u16 col = host->buf_start;
+ int n = mtd->oobsize + mtd->writesize - col;
+
+ n = min(n, len);
+
+ memcpy(host->data_buf + col, buf, n);
+
+ host->buf_start += n;
+}
+
+/* Read the data buffer from the NAND Flash. To read the data from NAND
+ * Flash first the data output cycle is initiated by the NFC, which copies
+ * the data to RAMbuffer. This data of length len is then copied to buffer buf.
+ */
+static void mxc_nand_read_buf(struct nand_chip *nand_chip, u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ u16 col = host->buf_start;
+ int n = mtd->oobsize + mtd->writesize - col;
+
+ n = min(n, len);
+
+ memcpy(buf, host->data_buf + col, n);
+
+ host->buf_start += n;
+}
+
+/* This function is used by upper layer for select and
+ * deselect of the NAND chip */
+static void mxc_nand_select_chip_v1_v3(struct nand_chip *nand_chip, int chip)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ if (chip == -1) {
+ /* Disable the NFC clock */
+ if (host->clk_act) {
+ clk_disable_unprepare(host->clk);
+ host->clk_act = 0;
+ }
+ return;
+ }
+
+ if (!host->clk_act) {
+ /* Enable the NFC clock */
+ clk_prepare_enable(host->clk);
+ host->clk_act = 1;
+ }
+}
+
+static void mxc_nand_select_chip_v2(struct nand_chip *nand_chip, int chip)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ if (chip == -1) {
+ /* Disable the NFC clock */
+ if (host->clk_act) {
+ clk_disable_unprepare(host->clk);
+ host->clk_act = 0;
+ }
+ return;
+ }
+
+ if (!host->clk_act) {
+ /* Enable the NFC clock */
+ clk_prepare_enable(host->clk);
+ host->clk_act = 1;
+ }
+
+ host->active_cs = chip;
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+}
+
+#define MXC_V1_ECCBYTES 5
+
+static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = MXC_V1_ECCBYTES;
+
+ return 0;
+}
+
+static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section > nand_chip->ecc.steps)
+ return -ERANGE;
+
+ if (!section) {
+ if (mtd->writesize <= 512) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 2;
+ oobregion->length = 4;
+ }
+ } else {
+ oobregion->offset = ((section - 1) * 16) + MXC_V1_ECCBYTES + 6;
+ if (section < nand_chip->ecc.steps)
+ oobregion->length = (section * 16) + 6 -
+ oobregion->offset;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mxc_v1_ooblayout_ops = {
+ .ecc = mxc_v1_ooblayout_ecc,
+ .free = mxc_v1_ooblayout_free,
+};
+
+static int mxc_v2_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * stepsize) + 7;
+ oobregion->length = nand_chip->ecc.bytes;
+
+ return 0;
+}
+
+static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ if (!section) {
+ if (mtd->writesize <= 512) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 2;
+ oobregion->length = 4;
+ }
+ } else {
+ oobregion->offset = section * stepsize;
+ oobregion->length = 7;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mxc_v2_ooblayout_ops = {
+ .ecc = mxc_v2_ooblayout_ecc,
+ .free = mxc_v2_ooblayout_free,
+};
+
+/*
+ * v2 and v3 type controllers can do 4bit or 8bit ecc depending
+ * on how much oob the nand chip has. For 8bit ecc we need at least
+ * 26 bytes of oob data per 512 byte block.
+ */
+static int get_eccsize(struct mtd_info *mtd)
+{
+ int oobbytes_per_512 = 0;
+
+ oobbytes_per_512 = mtd->oobsize * 512 / mtd->writesize;
+
+ if (oobbytes_per_512 < 26)
+ return 4;
+ else
+ return 8;
+}
+
+static void preset_v1(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint16_t config1 = 0;
+
+ if (nand_chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
+ mtd->writesize)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ if (!host->devtype_data->irqpending_quirk)
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ host->eccsize = 1;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+ /* Blocks to be unlocked */
+ writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static int mxc_nand_v2_setup_interface(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int tRC_min_ns, tRC_ps, ret;
+ unsigned long rate, rate_round;
+ const struct nand_sdr_timings *timings;
+ u16 config1;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ config1 = readw(NFC_V1_V2_CONFIG1);
+
+ tRC_min_ns = timings->tRC_min / 1000;
+ rate = 1000000000 / tRC_min_ns;
+
+ /*
+ * For tRC < 30ns we have to use EDO mode. In this case the controller
+ * does one access per clock cycle. Otherwise the controller does one
+ * access in two clock cycles, thus we have to double the rate to the
+ * controller.
+ */
+ if (tRC_min_ns < 30) {
+ rate_round = clk_round_rate(host->clk, rate);
+ config1 |= NFC_V2_CONFIG1_ONE_CYCLE;
+ tRC_ps = 1000000000 / (rate_round / 1000);
+ } else {
+ rate *= 2;
+ rate_round = clk_round_rate(host->clk, rate);
+ config1 &= ~NFC_V2_CONFIG1_ONE_CYCLE;
+ tRC_ps = 1000000000 / (rate_round / 1000 / 2);
+ }
+
+ /*
+ * The timing values compared against are from the i.MX25 Automotive
+ * datasheet, Table 50. NFC Timing Parameters
+ */
+ if (timings->tCLS_min > tRC_ps - 1000 ||
+ timings->tCLH_min > tRC_ps - 2000 ||
+ timings->tCS_min > tRC_ps - 1000 ||
+ timings->tCH_min > tRC_ps - 2000 ||
+ timings->tWP_min > tRC_ps - 1500 ||
+ timings->tALS_min > tRC_ps ||
+ timings->tALH_min > tRC_ps - 3000 ||
+ timings->tDS_min > tRC_ps ||
+ timings->tDH_min > tRC_ps - 5000 ||
+ timings->tWC_min > 2 * tRC_ps ||
+ timings->tWH_min > tRC_ps - 2500 ||
+ timings->tRR_min > 6 * tRC_ps ||
+ timings->tRP_min > 3 * tRC_ps / 2 ||
+ timings->tRC_min > 2 * tRC_ps ||
+ timings->tREH_min > (tRC_ps / 2) - 2500) {
+ dev_dbg(host->dev, "Timing out of bounds\n");
+ return -EINVAL;
+ }
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ ret = clk_set_rate(host->clk, rate);
+ if (ret)
+ return ret;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+
+ dev_dbg(host->dev, "Setting rate to %ldHz, %s mode\n", rate_round,
+ config1 & NFC_V2_CONFIG1_ONE_CYCLE ? "One cycle (EDO)" :
+ "normal");
+
+ return 0;
+}
+
+static void preset_v2(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint16_t config1 = 0;
+
+ config1 |= NFC_V2_CONFIG1_FP_INT;
+
+ if (!host->devtype_data->irqpending_quirk)
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ if (mtd->writesize) {
+ uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+
+ if (nand_chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ host->eccsize = get_eccsize(mtd);
+ if (host->eccsize == 4)
+ config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+
+ config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
+ } else {
+ host->eccsize = 1;
+ }
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
+ /* spare area size in 16-bit half-words */
+ writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+ /* Blocks to be unlocked */
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v3(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ uint32_t config2, config3;
+ int i, addr_phases;
+
+ writel(NFC_V3_CONFIG1_RBA(0), NFC_V3_CONFIG1);
+ writel(NFC_V3_IPC_CREQ, NFC_V3_IPC);
+
+ /* Unlock the internal RAM Buffer */
+ writel(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK,
+ NFC_V3_WRPROT);
+
+ /* Blocks to be unlocked */
+ for (i = 0; i < NAND_MAX_CHIPS; i++)
+ writel(0xffff << 16, NFC_V3_WRPROT_UNLOCK_BLK_ADD0 + (i << 2));
+
+ writel(0, NFC_V3_IPC);
+
+ config2 = NFC_V3_CONFIG2_ONE_CYCLE |
+ NFC_V3_CONFIG2_2CMD_PHASES |
+ NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
+ NFC_V3_CONFIG2_ST_CMD(0x70) |
+ NFC_V3_CONFIG2_INT_MSK |
+ NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
+
+ addr_phases = fls(chip->pagemask) >> 3;
+
+ if (mtd->writesize == 2048) {
+ config2 |= NFC_V3_CONFIG2_PS_2048;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+ } else if (mtd->writesize == 4096) {
+ config2 |= NFC_V3_CONFIG2_PS_4096;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+ } else {
+ config2 |= NFC_V3_CONFIG2_PS_512;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases - 1);
+ }
+
+ if (mtd->writesize) {
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ config2 |= NFC_V3_CONFIG2_ECC_EN;
+
+ config2 |= NFC_V3_CONFIG2_PPB(
+ ffs(mtd->erasesize / mtd->writesize) - 6,
+ host->devtype_data->ppb_shift);
+ host->eccsize = get_eccsize(mtd);
+ if (host->eccsize == 8)
+ config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
+ }
+
+ writel(config2, NFC_V3_CONFIG2);
+
+ config3 = NFC_V3_CONFIG3_NUM_OF_DEVICES(0) |
+ NFC_V3_CONFIG3_NO_SDMA |
+ NFC_V3_CONFIG3_RBB_MODE |
+ NFC_V3_CONFIG3_SBB(6) | /* Reset default */
+ NFC_V3_CONFIG3_ADD_OP(0);
+
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ config3 |= NFC_V3_CONFIG3_FW8;
+
+ writel(config3, NFC_V3_CONFIG3);
+
+ writel(0, NFC_V3_DELAY_LINE);
+}
+
+/* Used by the upper layer to write command to NAND Flash for
+ * different operations to be carried out on NAND Flash */
+static void mxc_nand_command(struct nand_chip *nand_chip, unsigned command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ command, column, page_addr);
+
+ /* Reset command state information */
+ host->status_request = false;
+
+ /* Command pre-processing step */
+ switch (command) {
+ case NAND_CMD_RESET:
+ host->devtype_data->preset(mtd);
+ host->devtype_data->send_cmd(host, command, false);
+ break;
+
+ case NAND_CMD_STATUS:
+ host->buf_start = 0;
+ host->status_request = true;
+
+ host->devtype_data->send_cmd(host, command, true);
+ WARN_ONCE(column != -1 || page_addr != -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_READID:
+ host->devtype_data->send_cmd(host, command, true);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ host->devtype_data->send_read_id(host);
+ host->buf_start = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ host->devtype_data->send_cmd(host, command, false);
+ WARN_ONCE(column != -1,
+ "Unexpected column value (cmd=%u, col=%d)\n",
+ command, column);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+
+ break;
+ case NAND_CMD_PARAM:
+ host->devtype_data->send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+ memcpy32_fromio(host->data_buf, host->main_area0, 512);
+ host->buf_start = 0;
+ break;
+ default:
+ WARN_ONCE(1, "Unimplemented command (cmd=%u)\n",
+ command);
+ break;
+ }
+}
+
+static int mxc_nand_set_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int i;
+
+ host->buf_start = 0;
+
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->legacy.write_byte(chip, subfeature_param[i]);
+
+ memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
+ host->devtype_data->send_cmd(host, NAND_CMD_SET_FEATURES, false);
+ mxc_do_addr_cycle(mtd, addr, -1);
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+
+ return 0;
+}
+
+static int mxc_nand_get_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int i;
+
+ host->devtype_data->send_cmd(host, NAND_CMD_GET_FEATURES, false);
+ mxc_do_addr_cycle(mtd, addr, -1);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+ memcpy32_fromio(host->data_buf, host->main_area0, 512);
+ host->buf_start = 0;
+
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ *subfeature_param++ = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+
+/*
+ * The generic flash bbt descriptors overlap with our ecc
+ * hardware, so define some i.MX specific ones.
+ */
+static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/* v1 + irqpending_quirk: i.MX21 */
+static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
+ .preset = preset_v1,
+ .read_page = mxc_nand_read_page_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ooblayout = &mxc_v1_ooblayout_ops,
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
+ .irqpending_quirk = 1,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
+static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
+ .preset = preset_v1,
+ .read_page = mxc_nand_read_page_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ooblayout = &mxc_v1_ooblayout_ops,
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .axi_offset = 0,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v21: i.MX25, i.MX35 */
+static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
+ .preset = preset_v2,
+ .read_page = mxc_nand_read_page_v2_v3,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v2,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v2,
+ .ooblayout = &mxc_v2_ooblayout_ops,
+ .select_chip = mxc_nand_select_chip_v2,
+ .setup_interface = mxc_nand_v2_setup_interface,
+ .enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0x1e00,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0,
+ .spare_len = 64,
+ .eccbytes = 9,
+ .eccsize = 0,
+};
+
+/* v3.2a: i.MX51 */
+static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
+ .preset = preset_v3,
+ .read_page = mxc_nand_read_page_v2_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ooblayout = &mxc_v2_ooblayout_ops,
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .enable_hwecc = mxc_nand_enable_hwecc_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+ .ppb_shift = 7,
+};
+
+/* v3.2b: i.MX53 */
+static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
+ .preset = preset_v3,
+ .read_page = mxc_nand_read_page_v2_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ooblayout = &mxc_v2_ooblayout_ops,
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .enable_hwecc = mxc_nand_enable_hwecc_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+ .ppb_shift = 8,
+};
+
+static inline int is_imx21_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx21_nand_devtype_data;
+}
+
+static inline int is_imx27_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx27_nand_devtype_data;
+}
+
+static inline int is_imx25_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx25_nand_devtype_data;
+}
+
+static inline int is_imx51_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx51_nand_devtype_data;
+}
+
+static inline int is_imx53_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx53_nand_devtype_data;
+}
+
+static const struct platform_device_id mxcnd_devtype[] = {
+ {
+ .name = "imx21-nand",
+ .driver_data = (kernel_ulong_t) &imx21_nand_devtype_data,
+ }, {
+ .name = "imx27-nand",
+ .driver_data = (kernel_ulong_t) &imx27_nand_devtype_data,
+ }, {
+ .name = "imx25-nand",
+ .driver_data = (kernel_ulong_t) &imx25_nand_devtype_data,
+ }, {
+ .name = "imx51-nand",
+ .driver_data = (kernel_ulong_t) &imx51_nand_devtype_data,
+ }, {
+ .name = "imx53-nand",
+ .driver_data = (kernel_ulong_t) &imx53_nand_devtype_data,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxcnd_devtype);
+
+#ifdef CONFIG_OF
+static const struct of_device_id mxcnd_dt_ids[] = {
+ {
+ .compatible = "fsl,imx21-nand",
+ .data = &imx21_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx27-nand",
+ .data = &imx27_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx25-nand",
+ .data = &imx25_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx51-nand",
+ .data = &imx51_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx53-nand",
+ .data = &imx53_nand_devtype_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxcnd_dt_ids);
+
+static int mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+ struct device_node *np = host->dev->of_node;
+ const struct of_device_id *of_id =
+ of_match_device(mxcnd_dt_ids, host->dev);
+
+ if (!np)
+ return 1;
+
+ host->devtype_data = of_id->data;
+
+ return 0;
+}
+#else
+static int mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+ return 1;
+}
+#endif
+
+static int mxcnd_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ struct device *dev = mtd->dev.parent;
+
+ chip->ecc.bytes = host->devtype_data->eccbytes;
+ host->eccsize = host->devtype_data->eccsize;
+ chip->ecc.size = 512;
+ mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ chip->ecc.read_page = mxc_nand_read_page;
+ chip->ecc.read_page_raw = mxc_nand_read_page_raw;
+ chip->ecc.read_oob = mxc_nand_read_oob;
+ chip->ecc.write_page = mxc_nand_write_page_ecc;
+ chip->ecc.write_page_raw = mxc_nand_write_page_raw;
+ chip->ecc.write_oob = mxc_nand_write_oob;
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Allocate the right size buffer now */
+ devm_kfree(dev, (void *)host->data_buf);
+ host->data_buf = devm_kzalloc(dev, mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!host->data_buf)
+ return -ENOMEM;
+
+ /* Call preset again, with correct writesize chip time */
+ host->devtype_data->preset(mtd);
+
+ if (!chip->ecc.bytes) {
+ if (host->eccsize == 8)
+ chip->ecc.bytes = 18;
+ else if (host->eccsize == 4)
+ chip->ecc.bytes = 9;
+ }
+
+ /*
+ * Experimentation shows that i.MX NFC can only handle up to 218 oob
+ * bytes. Limit used_oobsize to 218 so as to not confuse copy_spare()
+ * into copying invalid data to/from the spare IO buffer, as this
+ * might cause ECC data corruption when doing sub-page write to a
+ * partially written page.
+ */
+ host->used_oobsize = min(mtd->oobsize, 218U);
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ if (is_imx21_nfc(host) || is_imx27_nfc(host))
+ chip->ecc.strength = 1;
+ else
+ chip->ecc.strength = (host->eccsize == 4) ? 4 : 8;
+ }
+
+ return 0;
+}
+
+static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ return host->devtype_data->setup_interface(chip, chipnr, conf);
+}
+
+static const struct nand_controller_ops mxcnd_controller_ops = {
+ .attach_chip = mxcnd_attach_chip,
+ .setup_interface = mxcnd_setup_interface,
+};
+
+static int mxcnd_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct mtd_info *mtd;
+ struct mxc_nand_host *host;
+ struct resource *res;
+ int err = 0;
+
+ /* Allocate memory for MTD device structure and private data */
+ host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host),
+ GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ /* allocate a temporary buffer for the nand_scan_ident() */
+ host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
+ if (!host->data_buf)
+ return -ENOMEM;
+
+ host->dev = &pdev->dev;
+ /* structures must be linked */
+ this = &host->nand;
+ mtd = nand_to_mtd(this);
+ mtd->dev.parent = &pdev->dev;
+ mtd->name = DRIVER_NAME;
+
+ /* 50 us command delay time */
+ this->legacy.chip_delay = 5;
+
+ nand_set_controller_data(this, host);
+ nand_set_flash_node(this, pdev->dev.of_node),
+ this->legacy.dev_ready = mxc_nand_dev_ready;
+ this->legacy.cmdfunc = mxc_nand_command;
+ this->legacy.read_byte = mxc_nand_read_byte;
+ this->legacy.write_buf = mxc_nand_write_buf;
+ this->legacy.read_buf = mxc_nand_read_buf;
+ this->legacy.set_features = mxc_nand_set_features;
+ this->legacy.get_features = mxc_nand_get_features;
+
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ err = mxcnd_probe_dt(host);
+ if (err > 0) {
+ struct mxc_nand_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ host->pdata = *pdata;
+ host->devtype_data = (struct mxc_nand_devtype_data *)
+ pdev->id_entry->driver_data;
+ } else {
+ err = -ENODEV;
+ }
+ }
+ if (err < 0)
+ return err;
+
+ if (!host->devtype_data->setup_interface)
+ this->options |= NAND_KEEP_TIMINGS;
+
+ if (host->devtype_data->needs_ip) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs_ip))
+ return PTR_ERR(host->regs_ip);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ }
+
+ host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
+
+ host->main_area0 = host->base;
+
+ if (host->devtype_data->regs_offset)
+ host->regs = host->base + host->devtype_data->regs_offset;
+ host->spare0 = host->base + host->devtype_data->spare0_offset;
+ if (host->devtype_data->axi_offset)
+ host->regs_axi = host->base + host->devtype_data->axi_offset;
+
+ this->legacy.select_chip = host->devtype_data->select_chip;
+
+ /* NAND bus width determines access functions used by upper layer */
+ if (host->pdata.width == 2)
+ this->options |= NAND_BUSWIDTH_16;
+
+ /* update flash based bbt */
+ if (host->pdata.flash_bbt)
+ this->bbt_options |= NAND_BBT_USE_FLASH;
+
+ init_completion(&host->op_completion);
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
+ return host->irq;
+
+ /*
+ * Use host->devtype_data->irq_control() here instead of irq_control()
+ * because we must not disable_irq_nosync without having requested the
+ * irq.
+ */
+ host->devtype_data->irq_control(host, 0);
+
+ err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
+ 0, DRIVER_NAME, host);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(host->clk);
+ if (err)
+ return err;
+ host->clk_act = 1;
+
+ /*
+ * Now that we "own" the interrupt make sure the interrupt mask bit is
+ * cleared on i.MX21. Otherwise we can't read the interrupt status bit
+ * on this machine.
+ */
+ if (host->devtype_data->irqpending_quirk) {
+ disable_irq_nosync(host->irq);
+ host->devtype_data->irq_control(host, 1);
+ }
+
+ /* Scan the NAND device */
+ this->legacy.dummy_controller.ops = &mxcnd_controller_ops;
+ err = nand_scan(this, is_imx25_nfc(host) ? 4 : 1);
+ if (err)
+ goto escan;
+
+ /* Register the partitions */
+ err = mtd_device_parse_register(mtd, part_probes, NULL,
+ host->pdata.parts,
+ host->pdata.nr_parts);
+ if (err)
+ goto cleanup_nand;
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(this);
+escan:
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
+
+ return err;
+}
+
+static int mxcnd_remove(struct platform_device *pdev)
+{
+ struct mxc_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->nand;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static struct platform_driver mxcnd_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(mxcnd_dt_ids),
+ },
+ .id_table = mxcnd_devtype,
+ .probe = mxcnd_probe,
+ .remove = mxcnd_remove,
+};
+module_platform_driver(mxcnd_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c
new file mode 100644
index 000000000..d66b5b097
--- /dev/null
+++ b/drivers/mtd/nand/raw/mxic_nand.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Macronix International Co., Ltd.
+ *
+ * Author:
+ * Mason Yang <masonccyang@mxic.com.tw>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/platform_device.h>
+
+#include "internals.h"
+
+#define HC_CFG 0x0
+#define HC_CFG_IF_CFG(x) ((x) << 27)
+#define HC_CFG_DUAL_SLAVE BIT(31)
+#define HC_CFG_INDIVIDUAL BIT(30)
+#define HC_CFG_NIO(x) (((x) / 4) << 27)
+#define HC_CFG_TYPE(s, t) ((t) << (23 + ((s) * 2)))
+#define HC_CFG_TYPE_SPI_NOR 0
+#define HC_CFG_TYPE_SPI_NAND 1
+#define HC_CFG_TYPE_SPI_RAM 2
+#define HC_CFG_TYPE_RAW_NAND 3
+#define HC_CFG_SLV_ACT(x) ((x) << 21)
+#define HC_CFG_CLK_PH_EN BIT(20)
+#define HC_CFG_CLK_POL_INV BIT(19)
+#define HC_CFG_BIG_ENDIAN BIT(18)
+#define HC_CFG_DATA_PASS BIT(17)
+#define HC_CFG_IDLE_SIO_LVL(x) ((x) << 16)
+#define HC_CFG_MAN_START_EN BIT(3)
+#define HC_CFG_MAN_START BIT(2)
+#define HC_CFG_MAN_CS_EN BIT(1)
+#define HC_CFG_MAN_CS_ASSERT BIT(0)
+
+#define INT_STS 0x4
+#define INT_STS_EN 0x8
+#define INT_SIG_EN 0xc
+#define INT_STS_ALL GENMASK(31, 0)
+#define INT_RDY_PIN BIT(26)
+#define INT_RDY_SR BIT(25)
+#define INT_LNR_SUSP BIT(24)
+#define INT_ECC_ERR BIT(17)
+#define INT_CRC_ERR BIT(16)
+#define INT_LWR_DIS BIT(12)
+#define INT_LRD_DIS BIT(11)
+#define INT_SDMA_INT BIT(10)
+#define INT_DMA_FINISH BIT(9)
+#define INT_RX_NOT_FULL BIT(3)
+#define INT_RX_NOT_EMPTY BIT(2)
+#define INT_TX_NOT_FULL BIT(1)
+#define INT_TX_EMPTY BIT(0)
+
+#define HC_EN 0x10
+#define HC_EN_BIT BIT(0)
+
+#define TXD(x) (0x14 + ((x) * 4))
+#define RXD 0x24
+
+#define SS_CTRL(s) (0x30 + ((s) * 4))
+#define LRD_CFG 0x44
+#define LWR_CFG 0x80
+#define RWW_CFG 0x70
+#define OP_READ BIT(23)
+#define OP_DUMMY_CYC(x) ((x) << 17)
+#define OP_ADDR_BYTES(x) ((x) << 14)
+#define OP_CMD_BYTES(x) (((x) - 1) << 13)
+#define OP_OCTA_CRC_EN BIT(12)
+#define OP_DQS_EN BIT(11)
+#define OP_ENHC_EN BIT(10)
+#define OP_PREAMBLE_EN BIT(9)
+#define OP_DATA_DDR BIT(8)
+#define OP_DATA_BUSW(x) ((x) << 6)
+#define OP_ADDR_DDR BIT(5)
+#define OP_ADDR_BUSW(x) ((x) << 3)
+#define OP_CMD_DDR BIT(2)
+#define OP_CMD_BUSW(x) (x)
+#define OP_BUSW_1 0
+#define OP_BUSW_2 1
+#define OP_BUSW_4 2
+#define OP_BUSW_8 3
+
+#define OCTA_CRC 0x38
+#define OCTA_CRC_IN_EN(s) BIT(3 + ((s) * 16))
+#define OCTA_CRC_CHUNK(s, x) ((fls((x) / 32)) << (1 + ((s) * 16)))
+#define OCTA_CRC_OUT_EN(s) BIT(0 + ((s) * 16))
+
+#define ONFI_DIN_CNT(s) (0x3c + (s))
+
+#define LRD_CTRL 0x48
+#define RWW_CTRL 0x74
+#define LWR_CTRL 0x84
+#define LMODE_EN BIT(31)
+#define LMODE_SLV_ACT(x) ((x) << 21)
+#define LMODE_CMD1(x) ((x) << 8)
+#define LMODE_CMD0(x) (x)
+
+#define LRD_ADDR 0x4c
+#define LWR_ADDR 0x88
+#define LRD_RANGE 0x50
+#define LWR_RANGE 0x8c
+
+#define AXI_SLV_ADDR 0x54
+
+#define DMAC_RD_CFG 0x58
+#define DMAC_WR_CFG 0x94
+#define DMAC_CFG_PERIPH_EN BIT(31)
+#define DMAC_CFG_ALLFLUSH_EN BIT(30)
+#define DMAC_CFG_LASTFLUSH_EN BIT(29)
+#define DMAC_CFG_QE(x) (((x) + 1) << 16)
+#define DMAC_CFG_BURST_LEN(x) (((x) + 1) << 12)
+#define DMAC_CFG_BURST_SZ(x) ((x) << 8)
+#define DMAC_CFG_DIR_READ BIT(1)
+#define DMAC_CFG_START BIT(0)
+
+#define DMAC_RD_CNT 0x5c
+#define DMAC_WR_CNT 0x98
+
+#define SDMA_ADDR 0x60
+
+#define DMAM_CFG 0x64
+#define DMAM_CFG_START BIT(31)
+#define DMAM_CFG_CONT BIT(30)
+#define DMAM_CFG_SDMA_GAP(x) (fls((x) / 8192) << 2)
+#define DMAM_CFG_DIR_READ BIT(1)
+#define DMAM_CFG_EN BIT(0)
+
+#define DMAM_CNT 0x68
+
+#define LNR_TIMER_TH 0x6c
+
+#define RDM_CFG0 0x78
+#define RDM_CFG0_POLY(x) (x)
+
+#define RDM_CFG1 0x7c
+#define RDM_CFG1_RDM_EN BIT(31)
+#define RDM_CFG1_SEED(x) (x)
+
+#define LWR_SUSP_CTRL 0x90
+#define LWR_SUSP_CTRL_EN BIT(31)
+
+#define DMAS_CTRL 0x9c
+#define DMAS_CTRL_EN BIT(31)
+#define DMAS_CTRL_DIR_READ BIT(30)
+
+#define DATA_STROB 0xa0
+#define DATA_STROB_EDO_EN BIT(2)
+#define DATA_STROB_INV_POL BIT(1)
+#define DATA_STROB_DELAY_2CYC BIT(0)
+
+#define IDLY_CODE(x) (0xa4 + ((x) * 4))
+#define IDLY_CODE_VAL(x, v) ((v) << (((x) % 4) * 8))
+
+#define GPIO 0xc4
+#define GPIO_PT(x) BIT(3 + ((x) * 16))
+#define GPIO_RESET(x) BIT(2 + ((x) * 16))
+#define GPIO_HOLDB(x) BIT(1 + ((x) * 16))
+#define GPIO_WPB(x) BIT((x) * 16)
+
+#define HC_VER 0xd0
+
+#define HW_TEST(x) (0xe0 + ((x) * 4))
+
+#define MXIC_NFC_MAX_CLK_HZ 50000000
+#define IRQ_TIMEOUT 1000
+
+struct mxic_nand_ctlr {
+ struct clk *ps_clk;
+ struct clk *send_clk;
+ struct clk *send_dly_clk;
+ struct completion complete;
+ void __iomem *regs;
+ struct nand_controller controller;
+ struct device *dev;
+ struct nand_chip chip;
+};
+
+static int mxic_nfc_clk_enable(struct mxic_nand_ctlr *nfc)
+{
+ int ret;
+
+ ret = clk_prepare_enable(nfc->ps_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(nfc->send_clk);
+ if (ret)
+ goto err_ps_clk;
+
+ ret = clk_prepare_enable(nfc->send_dly_clk);
+ if (ret)
+ goto err_send_dly_clk;
+
+ return ret;
+
+err_send_dly_clk:
+ clk_disable_unprepare(nfc->send_clk);
+err_ps_clk:
+ clk_disable_unprepare(nfc->ps_clk);
+
+ return ret;
+}
+
+static void mxic_nfc_clk_disable(struct mxic_nand_ctlr *nfc)
+{
+ clk_disable_unprepare(nfc->send_clk);
+ clk_disable_unprepare(nfc->send_dly_clk);
+ clk_disable_unprepare(nfc->ps_clk);
+}
+
+static void mxic_nfc_set_input_delay(struct mxic_nand_ctlr *nfc, u8 idly_code)
+{
+ writel(IDLY_CODE_VAL(0, idly_code) |
+ IDLY_CODE_VAL(1, idly_code) |
+ IDLY_CODE_VAL(2, idly_code) |
+ IDLY_CODE_VAL(3, idly_code),
+ nfc->regs + IDLY_CODE(0));
+ writel(IDLY_CODE_VAL(4, idly_code) |
+ IDLY_CODE_VAL(5, idly_code) |
+ IDLY_CODE_VAL(6, idly_code) |
+ IDLY_CODE_VAL(7, idly_code),
+ nfc->regs + IDLY_CODE(1));
+}
+
+static int mxic_nfc_clk_setup(struct mxic_nand_ctlr *nfc, unsigned long freq)
+{
+ int ret;
+
+ ret = clk_set_rate(nfc->send_clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(nfc->send_dly_clk, freq);
+ if (ret)
+ return ret;
+
+ /*
+ * A constant delay range from 0x0 ~ 0x1F for input delay,
+ * the unit is 78 ps, the max input delay is 2.418 ns.
+ */
+ mxic_nfc_set_input_delay(nfc, 0xf);
+
+ /*
+ * Phase degree = 360 * freq * output-delay
+ * where output-delay is a constant value 1 ns in FPGA.
+ *
+ * Get Phase degree = 360 * freq * 1 ns
+ * = 360 * freq * 1 sec / 1000000000
+ * = 9 * freq / 25000000
+ */
+ ret = clk_set_phase(nfc->send_dly_clk, 9 * freq / 25000000);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mxic_nfc_set_freq(struct mxic_nand_ctlr *nfc, unsigned long freq)
+{
+ int ret;
+
+ if (freq > MXIC_NFC_MAX_CLK_HZ)
+ freq = MXIC_NFC_MAX_CLK_HZ;
+
+ mxic_nfc_clk_disable(nfc);
+ ret = mxic_nfc_clk_setup(nfc, freq);
+ if (ret)
+ return ret;
+
+ ret = mxic_nfc_clk_enable(nfc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static irqreturn_t mxic_nfc_isr(int irq, void *dev_id)
+{
+ struct mxic_nand_ctlr *nfc = dev_id;
+ u32 sts;
+
+ sts = readl(nfc->regs + INT_STS);
+ if (sts & INT_RDY_PIN)
+ complete(&nfc->complete);
+ else
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static void mxic_nfc_hw_init(struct mxic_nand_ctlr *nfc)
+{
+ writel(HC_CFG_NIO(8) | HC_CFG_TYPE(1, HC_CFG_TYPE_RAW_NAND) |
+ HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN |
+ HC_CFG_IDLE_SIO_LVL(1), nfc->regs + HC_CFG);
+ writel(INT_STS_ALL, nfc->regs + INT_STS_EN);
+ writel(INT_RDY_PIN, nfc->regs + INT_SIG_EN);
+ writel(0x0, nfc->regs + ONFI_DIN_CNT(0));
+ writel(0, nfc->regs + LRD_CFG);
+ writel(0, nfc->regs + LRD_CTRL);
+ writel(0x0, nfc->regs + HC_EN);
+}
+
+static void mxic_nfc_cs_enable(struct mxic_nand_ctlr *nfc)
+{
+ writel(readl(nfc->regs + HC_CFG) | HC_CFG_MAN_CS_EN,
+ nfc->regs + HC_CFG);
+ writel(HC_CFG_MAN_CS_ASSERT | readl(nfc->regs + HC_CFG),
+ nfc->regs + HC_CFG);
+}
+
+static void mxic_nfc_cs_disable(struct mxic_nand_ctlr *nfc)
+{
+ writel(~HC_CFG_MAN_CS_ASSERT & readl(nfc->regs + HC_CFG),
+ nfc->regs + HC_CFG);
+}
+
+static int mxic_nfc_wait_ready(struct nand_chip *chip)
+{
+ struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
+ int ret;
+
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(IRQ_TIMEOUT));
+ if (!ret) {
+ dev_err(nfc->dev, "nand device timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mxic_nfc_data_xfer(struct mxic_nand_ctlr *nfc, const void *txbuf,
+ void *rxbuf, unsigned int len)
+{
+ unsigned int pos = 0;
+
+ while (pos < len) {
+ unsigned int nbytes = len - pos;
+ u32 data = 0xffffffff;
+ u32 sts;
+ int ret;
+
+ if (nbytes > 4)
+ nbytes = 4;
+
+ if (txbuf)
+ memcpy(&data, txbuf + pos, nbytes);
+
+ ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
+ sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ writel(data, nfc->regs + TXD(nbytes % 4));
+
+ ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
+ sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
+ sts & INT_RX_NOT_EMPTY, 0,
+ USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ data = readl(nfc->regs + RXD);
+ if (rxbuf) {
+ data >>= (8 * (4 - nbytes));
+ memcpy(rxbuf + pos, &data, nbytes);
+ }
+ if (readl(nfc->regs + INT_STS) & INT_RX_NOT_EMPTY)
+ dev_warn(nfc->dev, "RX FIFO not empty\n");
+
+ pos += nbytes;
+ }
+
+ return 0;
+}
+
+static int mxic_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
+ const struct nand_op_instr *instr = NULL;
+ int ret = 0;
+ unsigned int op_id;
+
+ if (check_only)
+ return 0;
+
+ mxic_nfc_cs_enable(nfc);
+ init_completion(&nfc->complete);
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writel(0, nfc->regs + HC_EN);
+ writel(HC_EN_BIT, nfc->regs + HC_EN);
+ writel(OP_CMD_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
+ OP_CMD_BYTES(0), nfc->regs + SS_CTRL(0));
+
+ ret = mxic_nfc_data_xfer(nfc,
+ &instr->ctx.cmd.opcode,
+ NULL, 1);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ writel(OP_ADDR_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
+ OP_ADDR_BYTES(instr->ctx.addr.naddrs),
+ nfc->regs + SS_CTRL(0));
+ ret = mxic_nfc_data_xfer(nfc,
+ instr->ctx.addr.addrs, NULL,
+ instr->ctx.addr.naddrs);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ writel(0x0, nfc->regs + ONFI_DIN_CNT(0));
+ writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
+ OP_READ, nfc->regs + SS_CTRL(0));
+ ret = mxic_nfc_data_xfer(nfc, NULL,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ writel(instr->ctx.data.len,
+ nfc->regs + ONFI_DIN_CNT(0));
+ writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F),
+ nfc->regs + SS_CTRL(0));
+ ret = mxic_nfc_data_xfer(nfc,
+ instr->ctx.data.buf.out, NULL,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = mxic_nfc_wait_ready(chip);
+ break;
+ }
+ }
+ mxic_nfc_cs_disable(nfc);
+
+ return ret;
+}
+
+static int mxic_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
+ const struct nand_sdr_timings *sdr;
+ unsigned long freq;
+ int ret;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ freq = NSEC_PER_SEC / (sdr->tRC_min / 1000);
+
+ ret = mxic_nfc_set_freq(nfc, freq);
+ if (ret)
+ dev_err(nfc->dev, "set freq:%ld failed\n", freq);
+
+ if (sdr->tRC_min < 30000)
+ writel(DATA_STROB_EDO_EN, nfc->regs + DATA_STROB);
+
+ return 0;
+}
+
+static const struct nand_controller_ops mxic_nand_controller_ops = {
+ .exec_op = mxic_nfc_exec_op,
+ .setup_interface = mxic_nfc_setup_interface,
+};
+
+static int mxic_nfc_probe(struct platform_device *pdev)
+{
+ struct device_node *nand_np, *np = pdev->dev.of_node;
+ struct mtd_info *mtd;
+ struct mxic_nand_ctlr *nfc;
+ struct nand_chip *nand_chip;
+ int err;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(struct mxic_nand_ctlr),
+ GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->ps_clk = devm_clk_get(&pdev->dev, "ps");
+ if (IS_ERR(nfc->ps_clk))
+ return PTR_ERR(nfc->ps_clk);
+
+ nfc->send_clk = devm_clk_get(&pdev->dev, "send");
+ if (IS_ERR(nfc->send_clk))
+ return PTR_ERR(nfc->send_clk);
+
+ nfc->send_dly_clk = devm_clk_get(&pdev->dev, "send_dly");
+ if (IS_ERR(nfc->send_dly_clk))
+ return PTR_ERR(nfc->send_dly_clk);
+
+ nfc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ nand_chip = &nfc->chip;
+ mtd = nand_to_mtd(nand_chip);
+ mtd->dev.parent = &pdev->dev;
+
+ for_each_child_of_node(np, nand_np)
+ nand_set_flash_node(nand_chip, nand_np);
+
+ nand_chip->priv = nfc;
+ nfc->dev = &pdev->dev;
+ nfc->controller.ops = &mxic_nand_controller_ops;
+ nand_controller_init(&nfc->controller);
+ nand_chip->controller = &nfc->controller;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ mxic_nfc_hw_init(nfc);
+
+ err = devm_request_irq(&pdev->dev, irq, mxic_nfc_isr,
+ 0, "mxic-nfc", nfc);
+ if (err)
+ goto fail;
+
+ err = nand_scan(nand_chip, 1);
+ if (err)
+ goto fail;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto fail;
+
+ platform_set_drvdata(pdev, nfc);
+ return 0;
+
+fail:
+ mxic_nfc_clk_disable(nfc);
+ return err;
+}
+
+static int mxic_nfc_remove(struct platform_device *pdev)
+{
+ struct mxic_nand_ctlr *nfc = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &nfc->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ mxic_nfc_clk_disable(nfc);
+ return 0;
+}
+
+static const struct of_device_id mxic_nfc_of_ids[] = {
+ { .compatible = "mxic,multi-itfc-v009-nand-controller", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mxic_nfc_of_ids);
+
+static struct platform_driver mxic_nfc_driver = {
+ .probe = mxic_nfc_probe,
+ .remove = mxic_nfc_remove,
+ .driver = {
+ .name = "mxic-nfc",
+ .of_match_table = mxic_nfc_of_ids,
+ },
+};
+module_platform_driver(mxic_nfc_driver);
+
+MODULE_AUTHOR("Mason Yang <masonccyang@mxic.com.tw>");
+MODULE_DESCRIPTION("Macronix raw NAND controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/nand_amd.c b/drivers/mtd/nand/raw/nand_amd.c
new file mode 100644
index 000000000..c3d4dae3c
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_amd.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include "internals.h"
+
+static void amd_nand_decode_id(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ nand_decode_ext_id(chip);
+
+ /*
+ * Check for Spansion/AMD ID + repeating 5th, 6th byte since
+ * some Spansion chips have erasesize that conflicts with size
+ * listed in nand_ids table.
+ * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
+ */
+ if (chip->id.data[4] != 0x00 && chip->id.data[5] == 0x00 &&
+ chip->id.data[6] == 0x00 && chip->id.data[7] == 0x00 &&
+ memorg->pagesize == 512) {
+ memorg->pages_per_eraseblock = 256;
+ memorg->pages_per_eraseblock <<= ((chip->id.data[3] & 0x03) << 1);
+ mtd->erasesize = memorg->pages_per_eraseblock *
+ memorg->pagesize;
+ }
+}
+
+static int amd_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ /*
+ * According to the datasheet of some Cypress SLC NANDs,
+ * the bad block markers can be in the first, second or last
+ * page of a block. So let's check all three locations.
+ */
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
+ NAND_BBM_LASTPAGE;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops amd_nand_manuf_ops = {
+ .detect = amd_nand_decode_id,
+ .init = amd_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
new file mode 100644
index 000000000..c41c0ff61
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -0,0 +1,5987 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Overview:
+ * This is the generic MTD driver for NAND flash devices. It should be
+ * capable of working with almost all NAND chips currently available.
+ *
+ * Additional technical information is available on
+ * http://www.linux-mtd.infradead.org/doc/nand.html
+ *
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * TODO:
+ * Enable cached programming for 2k page size chips
+ * Check, if mtd->ecctype should be set to MTD_ECC_HW
+ * if we have HW ECC support.
+ * BBT table is not serialized, has to be fixed
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/gpio/consumer.h>
+
+#include "internals.h"
+
+static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
+ struct mtd_pairing_info *info)
+{
+ int lastpage = (mtd->erasesize / mtd->writesize) - 1;
+ int dist = 3;
+
+ if (page == lastpage)
+ dist = 2;
+
+ if (!page || (page & 1)) {
+ info->group = 0;
+ info->pair = (page + 1) / 2;
+ } else {
+ info->group = 1;
+ info->pair = (page + 1 - dist) / 2;
+ }
+
+ return 0;
+}
+
+static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
+ const struct mtd_pairing_info *info)
+{
+ int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
+ int page = info->pair * 2;
+ int dist = 3;
+
+ if (!info->group && !info->pair)
+ return 0;
+
+ if (info->pair == lastpair && info->group)
+ dist = 2;
+
+ if (!info->group)
+ page--;
+ else if (info->pair)
+ page += dist - 1;
+
+ if (page >= mtd->erasesize / mtd->writesize)
+ return -EINVAL;
+
+ return page;
+}
+
+const struct mtd_pairing_scheme dist3_pairing_scheme = {
+ .ngroups = 2,
+ .get_info = nand_pairing_dist3_get_info,
+ .get_wunit = nand_pairing_dist3_get_wunit,
+};
+
+static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: length not block aligned\n", __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_extract_bits - Copy unaligned bits from one buffer to another one
+ * @dst: destination buffer
+ * @dst_off: bit offset at which the writing starts
+ * @src: source buffer
+ * @src_off: bit offset at which the reading starts
+ * @nbits: number of bits to copy from @src to @dst
+ *
+ * Copy bits from one memory region to another (overlap authorized).
+ */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits)
+{
+ unsigned int tmp, n;
+
+ dst += dst_off / 8;
+ dst_off %= 8;
+ src += src_off / 8;
+ src_off %= 8;
+
+ while (nbits) {
+ n = min3(8 - dst_off, 8 - src_off, nbits);
+
+ tmp = (*src >> src_off) & GENMASK(n - 1, 0);
+ *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
+ *dst |= tmp << dst_off;
+
+ dst_off += n;
+ if (dst_off >= 8) {
+ dst++;
+ dst_off -= 8;
+ }
+
+ src_off += n;
+ if (src_off >= 8) {
+ src++;
+ src_off -= 8;
+ }
+
+ nbits -= n;
+ }
+}
+EXPORT_SYMBOL_GPL(nand_extract_bits);
+
+/**
+ * nand_select_target() - Select a NAND target (A.K.A. die)
+ * @chip: NAND chip object
+ * @cs: the CS line to select. Note that this CS id is always from the chip
+ * PoV, not the controller one
+ *
+ * Select a NAND target so that further operations executed on @chip go to the
+ * selected NAND target.
+ */
+void nand_select_target(struct nand_chip *chip, unsigned int cs)
+{
+ /*
+ * cs should always lie between 0 and nanddev_ntargets(), when that's
+ * not the case it's a bug and the caller should be fixed.
+ */
+ if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
+ return;
+
+ chip->cur_cs = cs;
+
+ if (chip->legacy.select_chip)
+ chip->legacy.select_chip(chip, cs);
+}
+EXPORT_SYMBOL_GPL(nand_select_target);
+
+/**
+ * nand_deselect_target() - Deselect the currently selected target
+ * @chip: NAND chip object
+ *
+ * Deselect the currently selected NAND target. The result of operations
+ * executed on @chip after the target has been deselected is undefined.
+ */
+void nand_deselect_target(struct nand_chip *chip)
+{
+ if (chip->legacy.select_chip)
+ chip->legacy.select_chip(chip, -1);
+
+ chip->cur_cs = -1;
+}
+EXPORT_SYMBOL_GPL(nand_deselect_target);
+
+/**
+ * nand_release_device - [GENERIC] release chip
+ * @chip: NAND chip object
+ *
+ * Release chip lock and wake up anyone waiting on the device.
+ */
+static void nand_release_device(struct nand_chip *chip)
+{
+ /* Release the controller and the chip */
+ mutex_unlock(&chip->controller->lock);
+ mutex_unlock(&chip->lock);
+}
+
+/**
+ * nand_bbm_get_next_page - Get the next page for bad block markers
+ * @chip: NAND chip object
+ * @page: First page to start checking for bad block marker usage
+ *
+ * Returns an integer that corresponds to the page offset within a block, for
+ * a page that is used to store bad block markers. If no more pages are
+ * available, -EINVAL is returned.
+ */
+int nand_bbm_get_next_page(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int last_page = ((mtd->erasesize - mtd->writesize) >>
+ chip->page_shift) & chip->pagemask;
+ unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
+ | NAND_BBM_LASTPAGE;
+
+ if (page == 0 && !(chip->options & bbm_flags))
+ return 0;
+ if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
+ return 0;
+ if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
+ return 1;
+ if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
+ return last_page;
+
+ return -EINVAL;
+}
+
+/**
+ * nand_block_bad - [DEFAULT] Read bad block marker from the chip
+ * @chip: NAND chip object
+ * @ofs: offset from device start
+ *
+ * Check, if the block is bad.
+ */
+static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
+{
+ int first_page, page_offset;
+ int res;
+ u8 bad;
+
+ first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ page_offset = nand_bbm_get_next_page(chip, 0);
+
+ while (page_offset >= 0) {
+ res = chip->ecc.read_oob(chip, first_page + page_offset);
+ if (res < 0)
+ return res;
+
+ bad = chip->oob_poi[chip->badblockpos];
+
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+ if (res)
+ return res;
+
+ page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
+ }
+
+ return 0;
+}
+
+static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
+{
+ if (chip->options & NAND_NO_BBM_QUIRK)
+ return 0;
+
+ if (chip->legacy.block_bad)
+ return chip->legacy.block_bad(chip, ofs);
+
+ return nand_block_bad(chip, ofs);
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: NAND chip structure
+ *
+ * Lock the device and its controller for exclusive access
+ *
+ * Return: -EBUSY if the chip has been suspended, 0 otherwise
+ */
+static void nand_get_device(struct nand_chip *chip)
+{
+ /* Wait until the device is resumed. */
+ while (1) {
+ mutex_lock(&chip->lock);
+ if (!chip->suspended) {
+ mutex_lock(&chip->controller->lock);
+ return;
+ }
+ mutex_unlock(&chip->lock);
+
+ wait_event(chip->resume_wq, !chip->suspended);
+ }
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @chip: NAND chip object
+ *
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
+ */
+static int nand_check_wp(struct nand_chip *chip)
+{
+ u8 status;
+ int ret;
+
+ /* Broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
+ /* Check the WP bit */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_WP ? 0 : 1;
+}
+
+/**
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @chip: NAND chip object
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
+ */
+static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB:
+ ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
+ ops->ooboffs, len);
+ BUG_ON(ret);
+ return oob + len;
+
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/**
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @chip: NAND chip object
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * NAND write out-of-band.
+ */
+static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chipnr, page, status, len, ret;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)ops->ooblen);
+
+ len = mtd_oobavail(mtd, ops);
+
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + ops->ooblen) > len) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ chipnr = (int)(to >> chip->chip_shift);
+
+ /*
+ * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+ * of my DiskOnChip 2000 test units) will clear the whole data page too
+ * if we don't do this. I have no clue why, but I seem to have 'fixed'
+ * it in the doc2000 driver in August 1999. dwmw2.
+ */
+ ret = nand_reset(chip, chipnr);
+ if (ret)
+ return ret;
+
+ nand_select_target(chip, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ nand_deselect_target(chip);
+ return -EROFS;
+ }
+
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == chip->pagecache.page)
+ chip->pagecache.page = -1;
+
+ nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(chip, page & chip->pagemask);
+
+ nand_deselect_target(chip);
+
+ if (status)
+ return status;
+
+ ops->oobretlen = ops->ooblen;
+
+ return 0;
+}
+
+/**
+ * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
+ * @chip: NAND chip object
+ * @ofs: offset from device start
+ *
+ * This is the default implementation, which can be overridden by a hardware
+ * specific driver. It provides the details for writing a bad block marker to a
+ * block.
+ */
+static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtd_oob_ops ops;
+ uint8_t buf[2] = { 0, 0 };
+ int ret = 0, res, page_offset;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.oobbuf = buf;
+ ops.ooboffs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ops.ooboffs &= ~0x01;
+ ops.len = ops.ooblen = 2;
+ } else {
+ ops.len = ops.ooblen = 1;
+ }
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ page_offset = nand_bbm_get_next_page(chip, 0);
+
+ while (page_offset >= 0) {
+ res = nand_do_write_oob(chip,
+ ofs + (page_offset * mtd->writesize),
+ &ops);
+
+ if (!ret)
+ ret = res;
+
+ page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
+ }
+
+ return ret;
+}
+
+/**
+ * nand_markbad_bbm - mark a block by updating the BBM
+ * @chip: NAND chip object
+ * @ofs: offset of the block to mark bad
+ */
+int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
+{
+ if (chip->legacy.block_markbad)
+ return chip->legacy.block_markbad(chip, ofs);
+
+ return nand_default_block_markbad(chip, ofs);
+}
+
+/**
+ * nand_block_markbad_lowlevel - mark a block bad
+ * @chip: NAND chip object
+ * @ofs: offset from device start
+ *
+ * This function performs the generic NAND bad block marking steps (i.e., bad
+ * block table(s) and/or marker(s)). We only allow the hardware driver to
+ * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
+ *
+ * We try operations in the following order:
+ *
+ * (1) erase the affected block, to allow OOB marker to be written cleanly
+ * (2) write bad block marker to OOB area of affected block (unless flag
+ * NAND_BBT_NO_OOB_BBM is present)
+ * (3) update the BBT
+ *
+ * Note that we retain the first error encountered in (2) or (3), finish the
+ * procedures, and dump the error in the end.
+*/
+static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int res, ret = 0;
+
+ if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
+ struct erase_info einfo;
+
+ /* Attempt erase before marking OOB */
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.addr = ofs;
+ einfo.len = 1ULL << chip->phys_erase_shift;
+ nand_erase_nand(chip, &einfo, 0);
+
+ /* Write bad block marker to OOB */
+ nand_get_device(chip);
+
+ ret = nand_markbad_bbm(chip, ofs);
+ nand_release_device(chip);
+ }
+
+ /* Mark block bad in BBT */
+ if (chip->bbt) {
+ res = nand_markbad_bbt(chip, ofs);
+ if (!ret)
+ ret = res;
+ }
+
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+
+/**
+ * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * Check if the block is marked as reserved.
+ */
+static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->bbt)
+ return 0;
+ /* Return info from the table */
+ return nand_isreserved_bbt(chip, ofs);
+}
+
+/**
+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @chip: NAND chip object
+ * @ofs: offset from device start
+ * @allowbbt: 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
+{
+ /* Return info from the table */
+ if (chip->bbt)
+ return nand_isbad_bbt(chip, ofs, allowbbt);
+
+ return nand_isbad_bbm(chip, ofs);
+}
+
+/**
+ * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
+ * @chip: NAND chip structure
+ * @timeout_ms: Timeout in ms
+ *
+ * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
+ * If that does not happen whitin the specified timeout, -ETIMEDOUT is
+ * returned.
+ *
+ * This helper is intended to be used when the controller does not have access
+ * to the NAND R/B pin.
+ *
+ * Be aware that calling this helper from an ->exec_op() implementation means
+ * ->exec_op() must be re-entrant.
+ *
+ * Return 0 if the NAND chip is ready, a negative error otherwise.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+ const struct nand_sdr_timings *timings;
+ u8 status = 0;
+ int ret;
+
+ if (!nand_has_exec_op(chip))
+ return -ENOTSUPP;
+
+ /* Wait tWB before polling the STATUS reg. */
+ timings = nand_get_sdr_timings(nand_get_interface_config(chip));
+ ndelay(PSEC_TO_NSEC(timings->tWB_max));
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ /*
+ * +1 below is necessary because if we are now in the last fraction
+ * of jiffy and msecs_to_jiffies is 1 then we will wait only that
+ * small jiffy fraction - possibly leading to false timeout
+ */
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
+ do {
+ ret = nand_read_data_op(chip, &status, sizeof(status), true,
+ false);
+ if (ret)
+ break;
+
+ if (status & NAND_STATUS_READY)
+ break;
+
+ /*
+ * Typical lowest execution time for a tR on most NANDs is 10us,
+ * use this as polling delay before doing something smarter (ie.
+ * deriving a delay from the timeout value, timeout_ms/ratio).
+ */
+ udelay(10);
+ } while (time_before(jiffies, timeout_ms));
+
+ /*
+ * We have to exit READ_STATUS mode in order to read real data on the
+ * bus in case the WAITRDY instruction is preceding a DATA_IN
+ * instruction.
+ */
+ nand_exit_status_op(chip);
+
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
+
+/**
+ * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
+ * @chip: NAND chip structure
+ * @gpiod: GPIO descriptor of R/B pin
+ * @timeout_ms: Timeout in ms
+ *
+ * Poll the R/B GPIO pin until it becomes ready. If that does not happen
+ * whitin the specified timeout, -ETIMEDOUT is returned.
+ *
+ * This helper is intended to be used when the controller has access to the
+ * NAND R/B pin over GPIO.
+ *
+ * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
+ */
+int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
+ unsigned long timeout_ms)
+{
+
+ /*
+ * Wait until R/B pin indicates chip is ready or timeout occurs.
+ * +1 below is necessary because if we are now in the last fraction
+ * of jiffy and msecs_to_jiffies is 1 then we will wait only that
+ * small jiffy fraction - possibly leading to false timeout.
+ */
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
+ do {
+ if (gpiod_get_value_cansleep(gpiod))
+ return 0;
+
+ cond_resched();
+ } while (time_before(jiffies, timeout_ms));
+
+ return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
+
+/**
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @chip: NAND chip structure
+ * @timeo: timeout
+ *
+ * Wait for command done. This is a helper function for nand_wait used when
+ * we are in interrupt context. May happen when in panic and trying to write
+ * an oops through mtdoops.
+ */
+void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
+{
+ int i;
+ for (i = 0; i < timeo; i++) {
+ if (chip->legacy.dev_ready) {
+ if (chip->legacy.dev_ready(chip))
+ break;
+ } else {
+ int ret;
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status),
+ true, false);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ }
+ mdelay(1);
+ }
+}
+
+static bool nand_supports_get_features(struct nand_chip *chip, int addr)
+{
+ return (chip->parameters.supports_set_get_features &&
+ test_bit(addr, chip->parameters.get_feature_list));
+}
+
+static bool nand_supports_set_features(struct nand_chip *chip, int addr)
+{
+ return (chip->parameters.supports_set_get_features &&
+ test_bit(addr, chip->parameters.set_feature_list));
+}
+
+/**
+ * nand_reset_interface - Reset data interface and timings
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Reset the Data interface and timings to ONFI mode 0.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_reset_interface(struct nand_chip *chip, int chipnr)
+{
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ int ret;
+
+ if (!nand_controller_can_setup_interface(chip))
+ return 0;
+
+ /*
+ * The ONFI specification says:
+ * "
+ * To transition from NV-DDR or NV-DDR2 to the SDR data
+ * interface, the host shall use the Reset (FFh) command
+ * using SDR timing mode 0. A device in any timing mode is
+ * required to recognize Reset (FFh) command issued in SDR
+ * timing mode 0.
+ * "
+ *
+ * Configure the data interface in SDR mode and set the
+ * timings to timing mode 0.
+ */
+
+ chip->current_interface_config = nand_get_reset_interface_config();
+ ret = ops->setup_interface(chip, chipnr,
+ chip->current_interface_config);
+ if (ret)
+ pr_err("Failed to configure data interface to SDR timing mode 0\n");
+
+ return ret;
+}
+
+/**
+ * nand_setup_interface - Setup the best data interface and timings
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Configure what has been reported to be the best data interface and NAND
+ * timings supported by the chip and the driver.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_setup_interface(struct nand_chip *chip, int chipnr)
+{
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { };
+ int ret;
+
+ if (!nand_controller_can_setup_interface(chip))
+ return 0;
+
+ /*
+ * A nand_reset_interface() put both the NAND chip and the NAND
+ * controller in timings mode 0. If the default mode for this chip is
+ * also 0, no need to proceed to the change again. Plus, at probe time,
+ * nand_setup_interface() uses ->set/get_features() which would
+ * fail anyway as the parameter page is not available yet.
+ */
+ if (!chip->best_interface_config)
+ return 0;
+
+ tmode_param[0] = chip->best_interface_config->timings.mode;
+
+ /* Change the mode on the chip side (if supported by the NAND chip) */
+ if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
+ nand_select_target(chip, chipnr);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
+ tmode_param);
+ nand_deselect_target(chip);
+ if (ret)
+ return ret;
+ }
+
+ /* Change the mode on the controller side */
+ ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
+ if (ret)
+ return ret;
+
+ /* Check the mode has been accepted by the chip, if supported */
+ if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
+ goto update_interface_config;
+
+ memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
+ nand_select_target(chip, chipnr);
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
+ tmode_param);
+ nand_deselect_target(chip);
+ if (ret)
+ goto err_reset_chip;
+
+ if (tmode_param[0] != chip->best_interface_config->timings.mode) {
+ pr_warn("timing mode %d not acknowledged by the NAND chip\n",
+ chip->best_interface_config->timings.mode);
+ goto err_reset_chip;
+ }
+
+update_interface_config:
+ chip->current_interface_config = chip->best_interface_config;
+
+ return 0;
+
+err_reset_chip:
+ /*
+ * Fallback to mode 0 if the chip explicitly did not ack the chosen
+ * timing mode.
+ */
+ nand_reset_interface(chip, chipnr);
+ nand_select_target(chip, chipnr);
+ nand_reset_op(chip);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+/**
+ * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
+ * NAND controller and the NAND chip support
+ * @chip: the NAND chip
+ * @iface: the interface configuration (can eventually be updated)
+ * @spec_timings: specific timings, when not fitting the ONFI specification
+ *
+ * If specific timings are provided, use them. Otherwise, retrieve supported
+ * timing modes from ONFI information.
+ */
+int nand_choose_best_sdr_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ struct nand_sdr_timings *spec_timings)
+{
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ int best_mode = 0, mode, ret;
+
+ iface->type = NAND_SDR_IFACE;
+
+ if (spec_timings) {
+ iface->timings.sdr = *spec_timings;
+ iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
+
+ /* Verify the controller supports the requested interface */
+ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+ iface);
+ if (!ret) {
+ chip->best_interface_config = iface;
+ return ret;
+ }
+
+ /* Fallback to slower modes */
+ best_mode = iface->timings.mode;
+ } else if (chip->parameters.onfi) {
+ best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1;
+ }
+
+ for (mode = best_mode; mode >= 0; mode--) {
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
+
+ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+ iface);
+ if (!ret)
+ break;
+ }
+
+ chip->best_interface_config = iface;
+
+ return 0;
+}
+
+/**
+ * nand_choose_interface_config - find the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find the best data interface and NAND timings supported by the chip
+ * and the driver. Eventually let the NAND manufacturer driver propose his own
+ * set of timings.
+ *
+ * After this function nand_chip->interface_config is initialized with the best
+ * timing mode available.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_choose_interface_config(struct nand_chip *chip)
+{
+ struct nand_interface_config *iface;
+ int ret;
+
+ if (!nand_controller_can_setup_interface(chip))
+ return 0;
+
+ iface = kzalloc(sizeof(*iface), GFP_KERNEL);
+ if (!iface)
+ return -ENOMEM;
+
+ if (chip->ops.choose_interface_config)
+ ret = chip->ops.choose_interface_config(chip, iface);
+ else
+ ret = nand_choose_best_sdr_timings(chip, iface, NULL);
+
+ if (ret)
+ kfree(iface);
+
+ return ret;
+}
+
+/**
+ * nand_fill_column_cycles - fill the column cycles of an address
+ * @chip: The NAND chip
+ * @addrs: Array of address cycles to fill
+ * @offset_in_page: The offset in the page
+ *
+ * Fills the first or the first two bytes of the @addrs field depending
+ * on the NAND bus width and the page size.
+ *
+ * Returns the number of cycles needed to encode the column, or a negative
+ * error code in case one of the arguments is invalid.
+ */
+static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ unsigned int offset_in_page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Make sure the offset is less than the actual page size. */
+ if (offset_in_page > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /*
+ * On small page NANDs, there's a dedicated command to access the OOB
+ * area, and the column address is relative to the start of the OOB
+ * area, not the start of the page. Asjust the address accordingly.
+ */
+ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+ offset_in_page -= mtd->writesize;
+
+ /*
+ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+ * wide, then it must be divided by 2.
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ if (WARN_ON(offset_in_page % 2))
+ return -EINVAL;
+
+ offset_in_page /= 2;
+ }
+
+ addrs[0] = offset_in_page;
+
+ /*
+ * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ * need 2
+ */
+ if (mtd->writesize <= 512)
+ return 1;
+
+ addrs[1] = offset_in_page >> 8;
+
+ return 2;
+}
+
+static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 addrs[4];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[1] = page;
+ addrs[2] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[3] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ u8 addrs[5];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(4, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[2] = page;
+ addrs[3] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[4] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+/**
+ * nand_read_page_op - Do a READ PAGE operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ if (mtd->writesize > 512)
+ return nand_lp_exec_read_page_op(chip, page,
+ offset_in_page, buf,
+ len);
+
+ return nand_sp_exec_read_page_op(chip, page, offset_in_page,
+ buf, len);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
+ if (len)
+ chip->legacy.read_buf(chip, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_op);
+
+/**
+ * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
+ * @chip: The NAND chip
+ * @page: parameter page to read
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PARAMETER PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *p = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PARAM, 0),
+ NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
+ for (i = 0; i < len; i++)
+ p[i] = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+
+/**
+ * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE READ COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ u8 addrs[2] = {};
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
+ PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ instrs[3].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
+ if (len)
+ chip->legacy.read_buf(chip, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_read_column_op);
+
+/**
+ * nand_read_oob_op - Do a READ OOB operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_oob: offset within the OOB area
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ OOB operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_oob, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_oob + len > mtd->oobsize)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip))
+ return nand_read_page_op(chip, page,
+ mtd->writesize + offset_in_oob,
+ buf, len);
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
+ if (len)
+ chip->legacy.read_buf(chip, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_oob_op);
+
+static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool prog)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 addrs[5] = {};
+ struct nand_op_instr instrs[] = {
+ /*
+ * The first instruction will be dropped if we're dealing
+ * with a large page NAND and adjusted if we're dealing
+ * with a small page NAND and the page offset is > 255.
+ */
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_CMD(NAND_CMD_SEQIN, 0),
+ NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ int ret;
+ u8 status;
+
+ if (naddrs < 0)
+ return naddrs;
+
+ addrs[naddrs++] = page;
+ addrs[naddrs++] = page >> 8;
+ if (chip->options & NAND_ROW_ADDR_3)
+ addrs[naddrs++] = page >> 16;
+
+ instrs[2].ctx.addr.naddrs = naddrs;
+
+ /* Drop the last two instructions if we're not programming the page. */
+ if (!prog) {
+ op.ninstrs -= 2;
+ /* Also drop the DATA_OUT instruction if empty. */
+ if (!len)
+ op.ninstrs--;
+ }
+
+ if (mtd->writesize <= 512) {
+ /*
+ * Small pages need some more tweaking: we have to adjust the
+ * first instruction depending on the page offset we're trying
+ * to access.
+ */
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+ } else {
+ /*
+ * Drop the first command if we're dealing with a large page
+ * NAND.
+ */
+ op.instrs++;
+ op.ninstrs--;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (!prog || ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status;
+}
+
+/**
+ * nand_prog_page_begin_op - starts a PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues the first half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip))
+ return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, false);
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
+
+ if (buf)
+ chip->legacy.write_buf(chip, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
+
+/**
+ * nand_prog_page_end_op - ends a PROG PAGE operation
+ * @chip: The NAND chip
+ *
+ * This function issues the second half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_end_op(struct nand_chip *chip)
+{
+ int ret;
+ u8 status;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PAGEPROG,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->legacy.waitfunc(chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
+
+/**
+ * nand_prog_page_op - Do a full PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues a full PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, true);
+ } else {
+ chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
+ page);
+ chip->legacy.write_buf(chip, buf, len);
+ chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->legacy.waitfunc(chip);
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_op);
+
+/**
+ * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to send to the NAND
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE WRITE COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ u8 addrs[2];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDIN, 0),
+ NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ instrs[2].ctx.data.force_8bit = force_8bit;
+
+ /* Drop the DATA_OUT instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
+ if (len)
+ chip->legacy.write_buf(chip, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_write_column_op);
+
+/**
+ * nand_readid_op - Do a READID operation
+ * @chip: The NAND chip
+ * @addr: address cycle to pass after the READID command
+ * @buf: buffer used to store the ID
+ * @len: length of the buffer
+ *
+ * This function sends a READID command and reads back the ID returned by the
+ * NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *id = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
+
+ for (i = 0; i < len; i++)
+ id[i] = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_readid_op);
+
+/**
+ * nand_status_op - Do a STATUS operation
+ * @chip: The NAND chip
+ * @status: out variable to store the NAND status
+ *
+ * This function sends a STATUS command and reads back the status returned by
+ * the NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_status_op(struct nand_chip *chip, u8 *status)
+{
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_STATUS,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(1, status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ if (!status)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
+ if (status)
+ *status = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_status_op);
+
+/**
+ * nand_exit_status_op - Exit a STATUS operation
+ * @chip: The NAND chip
+ *
+ * This function sends a READ0 command to cancel the effect of the STATUS
+ * command to avoid reading only the status until a new read command is sent.
+ *
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_exit_status_op(struct nand_chip *chip)
+{
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
+
+ return 0;
+}
+
+/**
+ * nand_erase_op - Do an erase operation
+ * @chip: The NAND chip
+ * @eraseblock: block to erase
+ *
+ * This function sends an ERASE command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
+{
+ unsigned int page = eraseblock <<
+ (chip->phys_erase_shift - chip->page_shift);
+ int ret;
+ u8 status;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ u8 addrs[3] = { page, page >> 8, page >> 16 };
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_ERASE1, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_ERASE2,
+ PSEC_TO_MSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ instrs[1].ctx.addr.naddrs++;
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
+ chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
+
+ ret = chip->legacy.waitfunc(chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_erase_op);
+
+/**
+ * nand_set_features_op - Do a SET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a SET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ const void *data)
+{
+ const u8 *params = data;
+ int i, ret;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->legacy.write_byte(chip, params[i]);
+
+ ret = chip->legacy.waitfunc(chip);
+ if (ret < 0)
+ return ret;
+
+ if (ret & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nand_get_features_op - Do a GET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a GET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_get_features_op(struct nand_chip *chip, u8 feature,
+ void *data)
+{
+ u8 *params = data;
+ int i;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
+ data, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ params[i] = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+
+static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
+ unsigned int delay_ns)
+{
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
+ PSEC_TO_NSEC(delay_ns)),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ /* Apply delay or wait for ready/busy pin */
+ if (!chip->legacy.dev_ready)
+ udelay(chip->legacy.chip_delay);
+ else
+ nand_wait_ready(chip);
+
+ return 0;
+}
+
+/**
+ * nand_reset_op - Do a reset operation
+ * @chip: The NAND chip
+ *
+ * This function sends a RESET command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_reset_op(struct nand_chip *chip)
+{
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_reset_op);
+
+/**
+ * nand_read_data_op - Read data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ * @check_only: do not actually run the command, only checks if the
+ * controller driver supports it
+ *
+ * This function does a raw data read on the bus. Usually used after launching
+ * another NAND operation like nand_read_page_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit, bool check_only)
+{
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ if (check_only)
+ return nand_check_op(chip, &op);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (check_only)
+ return 0;
+
+ if (force_8bit) {
+ u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ p[i] = chip->legacy.read_byte(chip);
+ } else {
+ chip->legacy.read_buf(chip, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_data_op);
+
+/**
+ * nand_write_data_op - Write data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer containing the data to send on the bus
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data write on the bus. Usually used after launching
+ * another NAND operation like nand_write_page_begin_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ const u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ chip->legacy.write_byte(chip, p[i]);
+ } else {
+ chip->legacy.write_buf(chip, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_write_data_op);
+
+/**
+ * struct nand_op_parser_ctx - Context used by the parser
+ * @instrs: array of all the instructions that must be addressed
+ * @ninstrs: length of the @instrs array
+ * @subop: Sub-operation to be passed to the NAND controller
+ *
+ * This structure is used by the core to split NAND operations into
+ * sub-operations that can be handled by the NAND controller.
+ */
+struct nand_op_parser_ctx {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ struct nand_subop subop;
+};
+
+/**
+ * nand_op_parser_must_split_instr - Checks if an instruction must be split
+ * @pat: the parser pattern element that matches @instr
+ * @instr: pointer to the instruction to check
+ * @start_offset: this is an in/out parameter. If @instr has already been
+ * split, then @start_offset is the offset from which to start
+ * (either an address cycle or an offset in the data buffer).
+ * Conversely, if the function returns true (ie. instr must be
+ * split), this parameter is updated to point to the first
+ * data/address cycle that has not been taken care of.
+ *
+ * Some NAND controllers are limited and cannot send X address cycles with a
+ * unique operation, or cannot read/write more than Y bytes at the same time.
+ * In this case, split the instruction that does not fit in a single
+ * controller-operation into two or more chunks.
+ *
+ * Returns true if the instruction must be split, false otherwise.
+ * The @start_offset parameter is also updated to the offset at which the next
+ * bundle of instruction must start (if an address or a data instruction).
+ */
+static bool
+nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
+ const struct nand_op_instr *instr,
+ unsigned int *start_offset)
+{
+ switch (pat->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (!pat->ctx.addr.maxcycles)
+ break;
+
+ if (instr->ctx.addr.naddrs - *start_offset >
+ pat->ctx.addr.maxcycles) {
+ *start_offset += pat->ctx.addr.maxcycles;
+ return true;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (!pat->ctx.data.maxlen)
+ break;
+
+ if (instr->ctx.data.len - *start_offset >
+ pat->ctx.data.maxlen) {
+ *start_offset += pat->ctx.data.maxlen;
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * nand_op_parser_match_pat - Checks if a pattern matches the instructions
+ * remaining in the parser context
+ * @pat: the pattern to test
+ * @ctx: the parser context structure to match with the pattern @pat
+ *
+ * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
+ * Returns true if this is the case, false ortherwise. When true is returned,
+ * @ctx->subop is updated with the set of instructions to be passed to the
+ * controller driver.
+ */
+static bool
+nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
+ struct nand_op_parser_ctx *ctx)
+{
+ unsigned int instr_offset = ctx->subop.first_instr_start_off;
+ const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
+ const struct nand_op_instr *instr = ctx->subop.instrs;
+ unsigned int i, ninstrs;
+
+ for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
+ /*
+ * The pattern instruction does not match the operation
+ * instruction. If the instruction is marked optional in the
+ * pattern definition, we skip the pattern element and continue
+ * to the next one. If the element is mandatory, there's no
+ * match and we can return false directly.
+ */
+ if (instr->type != pat->elems[i].type) {
+ if (!pat->elems[i].optional)
+ return false;
+
+ continue;
+ }
+
+ /*
+ * Now check the pattern element constraints. If the pattern is
+ * not able to handle the whole instruction in a single step,
+ * we have to split it.
+ * The last_instr_end_off value comes back updated to point to
+ * the position where we have to split the instruction (the
+ * start of the next subop chunk).
+ */
+ if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
+ &instr_offset)) {
+ ninstrs++;
+ i++;
+ break;
+ }
+
+ instr++;
+ ninstrs++;
+ instr_offset = 0;
+ }
+
+ /*
+ * This can happen if all instructions of a pattern are optional.
+ * Still, if there's not at least one instruction handled by this
+ * pattern, this is not a match, and we should try the next one (if
+ * any).
+ */
+ if (!ninstrs)
+ return false;
+
+ /*
+ * We had a match on the pattern head, but the pattern may be longer
+ * than the instructions we're asked to execute. We need to make sure
+ * there's no mandatory elements in the pattern tail.
+ */
+ for (; i < pat->nelems; i++) {
+ if (!pat->elems[i].optional)
+ return false;
+ }
+
+ /*
+ * We have a match: update the subop structure accordingly and return
+ * true.
+ */
+ ctx->subop.ninstrs = ninstrs;
+ ctx->subop.last_instr_end_off = instr_offset;
+
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ const struct nand_op_instr *instr;
+ char *prefix = " ";
+ unsigned int i;
+
+ pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
+
+ for (i = 0; i < ctx->ninstrs; i++) {
+ instr = &ctx->instrs[i];
+
+ if (instr == &ctx->subop.instrs[0])
+ prefix = " ->";
+
+ nand_op_trace(prefix, instr);
+
+ if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
+ prefix = " ";
+ }
+}
+#else
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ /* NOP */
+}
+#endif
+
+static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
+ const struct nand_op_parser_ctx *b)
+{
+ if (a->subop.ninstrs < b->subop.ninstrs)
+ return -1;
+ else if (a->subop.ninstrs > b->subop.ninstrs)
+ return 1;
+
+ if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
+ return -1;
+ else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * nand_op_parser_exec_op - exec_op parser
+ * @chip: the NAND chip
+ * @parser: patterns description provided by the controller driver
+ * @op: the NAND operation to address
+ * @check_only: when true, the function only checks if @op can be handled but
+ * does not execute the operation
+ *
+ * Helper function designed to ease integration of NAND controller drivers that
+ * only support a limited set of instruction sequences. The supported sequences
+ * are described in @parser, and the framework takes care of splitting @op into
+ * multiple sub-operations (if required) and pass them back to the ->exec()
+ * callback of the matching pattern if @check_only is set to false.
+ *
+ * NAND controller drivers should call this function from their own ->exec_op()
+ * implementation.
+ *
+ * Returns 0 on success, a negative error code otherwise. A failure can be
+ * caused by an unsupported operation (none of the supported patterns is able
+ * to handle the requested operation), or an error returned by one of the
+ * matching pattern->exec() hook.
+ */
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only)
+{
+ struct nand_op_parser_ctx ctx = {
+ .subop.cs = op->cs,
+ .subop.instrs = op->instrs,
+ .instrs = op->instrs,
+ .ninstrs = op->ninstrs,
+ };
+ unsigned int i;
+
+ while (ctx.subop.instrs < op->instrs + op->ninstrs) {
+ const struct nand_op_parser_pattern *pattern;
+ struct nand_op_parser_ctx best_ctx;
+ int ret, best_pattern = -1;
+
+ for (i = 0; i < parser->npatterns; i++) {
+ struct nand_op_parser_ctx test_ctx = ctx;
+
+ pattern = &parser->patterns[i];
+ if (!nand_op_parser_match_pat(pattern, &test_ctx))
+ continue;
+
+ if (best_pattern >= 0 &&
+ nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
+ continue;
+
+ best_pattern = i;
+ best_ctx = test_ctx;
+ }
+
+ if (best_pattern < 0) {
+ pr_debug("->exec_op() parser: pattern not found!\n");
+ return -ENOTSUPP;
+ }
+
+ ctx = best_ctx;
+ nand_op_parser_trace(&ctx);
+
+ if (!check_only) {
+ pattern = &parser->patterns[best_pattern];
+ ret = pattern->exec(chip, &ctx.subop);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Update the context structure by pointing to the start of the
+ * next subop.
+ */
+ ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
+ if (ctx.subop.last_instr_end_off)
+ ctx.subop.instrs -= 1;
+
+ ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
+
+static bool nand_instr_is_data(const struct nand_op_instr *instr)
+{
+ return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
+ instr->type == NAND_OP_DATA_OUT_INSTR);
+}
+
+static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ return subop && instr_idx < subop->ninstrs;
+}
+
+static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (instr_idx)
+ return 0;
+
+ return subop->first_instr_start_off;
+}
+
+/**
+ * nand_subop_get_addr_start_off - Get the start offset in an address array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr.addrs field of address instructions. This is wrong as address
+ * instructions might be split.
+ *
+ * Given an address instruction, returns the offset of the first cycle to issue.
+ */
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
+
+/**
+ * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr->naddrs field of a data instruction. This is wrong as instructions
+ * might be split.
+ *
+ * Given an address instruction, returns the number of address cycle to issue.
+ */
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off, end_off;
+
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;
+
+ start_off = nand_subop_get_addr_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
+
+/**
+ * nand_subop_get_data_start_off - Get the start offset in a data array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->buf.{in,out} field of data instructions. This is wrong as data
+ * instructions might be split.
+ *
+ * Given a data instruction, returns the offset to start from.
+ */
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
+
+/**
+ * nand_subop_get_data_len - Get the number of bytes to retrieve
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->len field of a data instruction. This is wrong as data instructions
+ * might be split.
+ *
+ * Returns the length of the chunk of data to send/receive.
+ */
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off = 0, end_off;
+
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;
+
+ start_off = nand_subop_get_data_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.data.len;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
+
+/**
+ * nand_reset - Reset and initialize a NAND device
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Save the timings data structure, then apply SDR timings mode 0 (see
+ * nand_reset_interface for details), do the reset operation, and apply
+ * back the previous timings.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_reset(struct nand_chip *chip, int chipnr)
+{
+ int ret;
+
+ ret = nand_reset_interface(chip, chipnr);
+ if (ret)
+ return ret;
+
+ /*
+ * The CS line has to be released before we can apply the new NAND
+ * interface settings, hence this weird nand_select_target()
+ * nand_deselect_target() dance.
+ */
+ nand_select_target(chip, chipnr);
+ ret = nand_reset_op(chip);
+ nand_deselect_target(chip);
+ if (ret)
+ return ret;
+
+ ret = nand_setup_interface(chip, chipnr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_reset);
+
+/**
+ * nand_get_features - wrapper to perform a GET_FEATURE
+ * @chip: NAND chip info structure
+ * @addr: feature address
+ * @subfeature_param: the subfeature parameters, a four bytes array
+ *
+ * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
+ * operation cannot be handled.
+ */
+int nand_get_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ if (!nand_supports_get_features(chip, addr))
+ return -ENOTSUPP;
+
+ if (chip->legacy.get_features)
+ return chip->legacy.get_features(chip, addr, subfeature_param);
+
+ return nand_get_features_op(chip, addr, subfeature_param);
+}
+
+/**
+ * nand_set_features - wrapper to perform a SET_FEATURE
+ * @chip: NAND chip info structure
+ * @addr: feature address
+ * @subfeature_param: the subfeature parameters, a four bytes array
+ *
+ * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
+ * operation cannot be handled.
+ */
+int nand_set_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ if (!nand_supports_set_features(chip, addr))
+ return -ENOTSUPP;
+
+ if (chip->legacy.set_features)
+ return chip->legacy.set_features(chip, addr, subfeature_param);
+
+ return nand_set_features_op(chip, addr, subfeature_param);
+}
+
+/**
+ * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
+ * @buf: buffer to test
+ * @len: buffer length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a buffer contains only 0xff, which means the underlying region
+ * has been erased and is ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region is not erased.
+ * Note: The logic of this function has been extracted from the memweight
+ * implementation, except that nand_check_erased_buf function exit before
+ * testing the whole buffer if the number of bitflips exceed the
+ * bitflips_threshold value.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold.
+ */
+static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
+{
+ const unsigned char *bitmap = buf;
+ int bitflips = 0;
+ int weight;
+
+ for (; len && ((uintptr_t)bitmap) % sizeof(long);
+ len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len >= sizeof(long);
+ len -= sizeof(long), bitmap += sizeof(long)) {
+ unsigned long d = *((unsigned long *)bitmap);
+ if (d == ~0UL)
+ continue;
+ weight = hweight_long(d);
+ bitflips += BITS_PER_LONG - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len > 0; len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ return bitflips;
+}
+
+/**
+ * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
+ * 0xff data
+ * @data: data buffer to test
+ * @datalen: data length
+ * @ecc: ECC buffer
+ * @ecclen: ECC length
+ * @extraoob: extra OOB buffer
+ * @extraooblen: extra OOB length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a data buffer and its associated ECC and OOB data contains only
+ * 0xff pattern, which means the underlying region has been erased and is
+ * ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region as not erased.
+ *
+ * Note:
+ * 1/ ECC algorithms are working on pre-defined block sizes which are usually
+ * different from the NAND page size. When fixing bitflips, ECC engines will
+ * report the number of errors per chunk, and the NAND core infrastructure
+ * expect you to return the maximum number of bitflips for the whole page.
+ * This is why you should always use this function on a single chunk and
+ * not on the whole page. After checking each chunk you should update your
+ * max_bitflips value accordingly.
+ * 2/ When checking for bitflips in erased pages you should not only check
+ * the payload data but also their associated ECC data, because a user might
+ * have programmed almost all bits to 1 but a few. In this case, we
+ * shouldn't consider the chunk as erased, and checking ECC bytes prevent
+ * this case.
+ * 3/ The extraoob argument is optional, and should be used if some of your OOB
+ * data are protected by the ECC engine.
+ * It could also be used if you support subpages and want to attach some
+ * extra OOB data to an ECC chunk.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold. In case of success, the passed buffers are filled with 0xff.
+ */
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int bitflips_threshold)
+{
+ int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
+
+ data_bitflips = nand_check_erased_buf(data, datalen,
+ bitflips_threshold);
+ if (data_bitflips < 0)
+ return data_bitflips;
+
+ bitflips_threshold -= data_bitflips;
+
+ ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
+ if (ecc_bitflips < 0)
+ return ecc_bitflips;
+
+ bitflips_threshold -= ecc_bitflips;
+
+ extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
+ bitflips_threshold);
+ if (extraoob_bitflips < 0)
+ return extraoob_bitflips;
+
+ if (data_bitflips)
+ memset(data, 0xff, datalen);
+
+ if (ecc_bitflips)
+ memset(ecc, 0xff, ecclen);
+
+ if (extraoob_bitflips)
+ memset(extraoob, 0xff, extraooblen);
+
+ return data_bitflips + ecc_bitflips + extraoob_bitflips;
+}
+EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
+
+/**
+ * nand_read_page_raw_notsupp - dummy read raw page function
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Returns -ENOTSUPP unconditionally.
+ */
+int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return -ENOTSUPP;
+}
+
+/**
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_read_page_raw);
+
+/**
+ * nand_monolithic_read_page_raw - Monolithic page read in raw mode
+ * @chip: NAND chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * This is a raw page read, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be loaded in the NAND cache and sent over the
+ * bus (from the NAND chip to the NAND controller) in a single
+ * operation. This is an alternative to nand_read_page_raw(), which
+ * first reads the main data, and if the OOB data is requested too,
+ * then reads more data on the bus.
+ */
+int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int size = mtd->writesize;
+ u8 *read_buf = buf;
+ int ret;
+
+ if (oob_required) {
+ size += mtd->oobsize;
+
+ if (buf != chip->data_buf)
+ read_buf = nand_get_data_buf(chip);
+ }
+
+ ret = nand_read_page_op(chip, page, 0, read_buf, size);
+ if (ret)
+ return ret;
+
+ if (buf != chip->data_buf)
+ memcpy(buf, read_buf, mtd->writesize);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_monolithic_read_page_raw);
+
+/**
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * We need a special oob layout and handling even when OOB isn't used.
+ */
+static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size, ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ ret = nand_read_data_op(chip, buf, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false, false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false, false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size) {
+ ret = nand_read_data_op(chip, oob, size, false, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ */
+static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ chip->ecc.read_page_raw(chip, buf, 1, page);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
+ * @page: page number to read
+ */
+static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
+ uint32_t readlen, uint8_t *bufpoi, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int start_step, end_step, num_steps, ret;
+ uint8_t *p;
+ int data_col_addr, i, gaps = 0;
+ int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
+ int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ int index, section = 0;
+ unsigned int max_bitflips = 0;
+ struct mtd_oob_region oobregion = { };
+
+ /* Column address within the page aligned to ECC size (256bytes) */
+ start_step = data_offs / chip->ecc.size;
+ end_step = (data_offs + readlen - 1) / chip->ecc.size;
+ num_steps = end_step - start_step + 1;
+ index = start_step * chip->ecc.bytes;
+
+ /* Data size aligned to ECC ecc.size */
+ datafrag_len = num_steps * chip->ecc.size;
+ eccfrag_len = num_steps * chip->ecc.bytes;
+
+ data_col_addr = start_step * chip->ecc.size;
+ /* If we read not a page aligned data */
+ p = bufpoi + data_col_addr;
+ ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
+ if (ret)
+ return ret;
+
+ /* Calculate ECC */
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+ chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
+
+ /*
+ * The performance is faster if we position offsets according to
+ * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+ */
+ ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
+ if (ret)
+ return ret;
+
+ if (oobregion.length < eccfrag_len)
+ gaps = 1;
+
+ if (gaps) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * Send the command to read the particular ECC bytes take care
+ * about buswidth alignment in read_buf.
+ */
+ aligned_pos = oobregion.offset & ~(busw - 1);
+ aligned_len = eccfrag_len;
+ if (oobregion.offset & (busw - 1))
+ aligned_len++;
+ if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
+ (busw - 1))
+ aligned_len++;
+
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + aligned_pos,
+ &chip->oob_poi[aligned_pos],
+ aligned_len, false);
+ if (ret)
+ return ret;
+ }
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
+ chip->oob_poi, index, eccfrag_len);
+ if (ret)
+ return ret;
+
+ p = bufpoi + data_col_addr;
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
+ int stat;
+
+ stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
+ &chip->ecc.calc_buf[i]);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
+ &chip->ecc.code_buf[i],
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
+ */
+static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+ }
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+ false);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ &ecc_code[i], eccbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret, i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ unsigned int max_bitflips = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ if (chip->ecc.prepad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false, false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.hwctl(chip, NAND_ECC_READSYN);
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false, false);
+ if (ret)
+ return ret;
+
+ stat = chip->ecc.correct(chip, p, oob, NULL);
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false, false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
+ oob - eccpadbytes,
+ eccpadbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i) {
+ ret = nand_read_data_op(chip, oob, i, false, false);
+ if (ret)
+ return ret;
+ }
+
+ return max_bitflips;
+}
+
+/**
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: NAND chip object
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
+ */
+static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
+ struct mtd_oob_ops *ops, size_t len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(oob, chip->oob_poi + ops->ooboffs, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB:
+ ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
+ ops->ooboffs, len);
+ BUG_ON(ret);
+ return oob + len;
+
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @chip: NAND chip object
+ * @retry_mode: the retry mode to use
+ *
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
+{
+ pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+ if (retry_mode >= chip->read_retries)
+ return -EINVAL;
+
+ if (!chip->ops.setup_read_retry)
+ return -EOPNOTSUPP;
+
+ return chip->ops.setup_read_retry(chip, retry_mode);
+}
+
+static void nand_wait_readrdy(struct nand_chip *chip)
+{
+ const struct nand_sdr_timings *sdr;
+
+ if (!(chip->options & NAND_NEED_READRDY))
+ return;
+
+ sdr = nand_get_sdr_timings(nand_get_interface_config(chip));
+ WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
+}
+
+/**
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @chip: NAND chip object
+ * @from: offset to read from
+ * @ops: oob ops structure
+ *
+ * Internal function. Called with chip held.
+ */
+static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, realpage, col, bytes, aligned, oob_required;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0;
+ uint32_t readlen = ops->len;
+ uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = mtd_oobavail(mtd, ops);
+
+ uint8_t *bufpoi, *oob, *buf;
+ int use_bounce_buf;
+ unsigned int max_bitflips = 0;
+ int retry_mode = 0;
+ bool ecc_fail = false;
+
+ chipnr = (int)(from >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ col = (int)(from & (mtd->writesize - 1));
+
+ buf = ops->datbuf;
+ oob = ops->oobbuf;
+ oob_required = oob ? 1 : 0;
+
+ while (1) {
+ struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
+
+ bytes = min(mtd->writesize - col, readlen);
+ aligned = (bytes == mtd->writesize);
+
+ if (!aligned)
+ use_bounce_buf = 1;
+ else if (chip->options & NAND_USES_DMA)
+ use_bounce_buf = !virt_addr_valid(buf) ||
+ !IS_ALIGNED((unsigned long)buf,
+ chip->buf_align);
+ else
+ use_bounce_buf = 0;
+
+ /* Is the current page in the buffer? */
+ if (realpage != chip->pagecache.page || oob) {
+ bufpoi = use_bounce_buf ? chip->data_buf : buf;
+
+ if (use_bounce_buf && aligned)
+ pr_debug("%s: using read bounce buffer for buf@%p\n",
+ __func__, buf);
+
+read_retry:
+ /*
+ * Now read the page into the buffer. Absent an error,
+ * the read methods return max bitflips per ecc step.
+ */
+ if (unlikely(ops->mode == MTD_OPS_RAW))
+ ret = chip->ecc.read_page_raw(chip, bufpoi,
+ oob_required,
+ page);
+ else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
+ !oob)
+ ret = chip->ecc.read_subpage(chip, col, bytes,
+ bufpoi, page);
+ else
+ ret = chip->ecc.read_page(chip, bufpoi,
+ oob_required, page);
+ if (ret < 0) {
+ if (use_bounce_buf)
+ /* Invalidate page cache */
+ chip->pagecache.page = -1;
+ break;
+ }
+
+ /*
+ * Copy back the data in the initial buffer when reading
+ * partial pages or when a bounce buffer is required.
+ */
+ if (use_bounce_buf) {
+ if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
+ !(mtd->ecc_stats.failed - ecc_stats.failed) &&
+ (ops->mode != MTD_OPS_RAW)) {
+ chip->pagecache.page = realpage;
+ chip->pagecache.bitflips = ret;
+ } else {
+ /* Invalidate page cache */
+ chip->pagecache.page = -1;
+ }
+ memcpy(buf, bufpoi + col, bytes);
+ }
+
+ if (unlikely(oob)) {
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip, oob, ops,
+ toread);
+ oobreadlen -= toread;
+ }
+ }
+
+ nand_wait_readrdy(chip);
+
+ if (mtd->ecc_stats.failed - ecc_stats.failed) {
+ if (retry_mode + 1 < chip->read_retries) {
+ retry_mode++;
+ ret = nand_setup_read_retry(chip,
+ retry_mode);
+ if (ret < 0)
+ break;
+
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = ecc_stats;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_fail = true;
+ }
+ }
+
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ } else {
+ memcpy(buf, chip->data_buf + col, bytes);
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagecache.bitflips);
+ }
+
+ readlen -= bytes;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ ret = nand_setup_read_retry(chip, 0);
+ if (ret < 0)
+ break;
+ retry_mode = 0;
+ }
+
+ if (!readlen)
+ break;
+
+ /* For subsequent reads align to page boundary */
+ col = 0;
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+ nand_deselect_target(chip);
+
+ ops->retlen = ops->len - (size_t) readlen;
+ if (oob)
+ ops->oobretlen = ops->ooblen - oobreadlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (ecc_fail)
+ return -EBADMSG;
+
+ return max_bitflips;
+}
+
+/**
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+int nand_read_oob_std(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+}
+EXPORT_SYMBOL(nand_read_oob_std);
+
+/**
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
+ * with syndromes
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int length = mtd->oobsize;
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size;
+ uint8_t *bufpoi = chip->oob_poi;
+ int i, toread, sndrnd = 0, pos, ret;
+
+ ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (sndrnd) {
+ int ret;
+
+ pos = eccsize + i * (eccsize + chunk);
+ if (mtd->writesize > 512)
+ ret = nand_change_read_column_op(chip, pos,
+ NULL, 0,
+ false);
+ else
+ ret = nand_read_page_op(chip, page, pos, NULL,
+ 0);
+
+ if (ret)
+ return ret;
+ } else
+ sndrnd = 1;
+ toread = min_t(int, length, chunk);
+
+ ret = nand_read_data_op(chip, bufpoi, toread, false, false);
+ if (ret)
+ return ret;
+
+ bufpoi += toread;
+ length -= toread;
+ }
+ if (length > 0) {
+ ret = nand_read_data_op(chip, bufpoi, length, false, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+int nand_write_oob_std(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+}
+EXPORT_SYMBOL(nand_write_oob_std);
+
+/**
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ * with syndrome - only for large page flash
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size, length = mtd->oobsize;
+ int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
+ const uint8_t *bufpoi = chip->oob_poi;
+
+ /*
+ * data-ecc-data-ecc ... ecc-oob
+ * or
+ * data-pad-ecc-pad-data-pad .... ecc-pad-oob
+ */
+ if (!chip->ecc.prepad && !chip->ecc.postpad) {
+ pos = steps * (eccsize + chunk);
+ steps = 0;
+ } else
+ pos = eccsize;
+
+ ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < steps; i++) {
+ if (sndcmd) {
+ if (mtd->writesize <= 512) {
+ uint32_t fill = 0xFFFFFFFF;
+
+ len = eccsize;
+ while (len > 0) {
+ int num = min_t(int, len, 4);
+
+ ret = nand_write_data_op(chip, &fill,
+ num, false);
+ if (ret)
+ return ret;
+
+ len -= num;
+ }
+ } else {
+ pos = eccsize + i * (eccsize + chunk);
+ ret = nand_change_write_column_op(chip, pos,
+ NULL, 0,
+ false);
+ if (ret)
+ return ret;
+ }
+ } else
+ sndcmd = 1;
+ len = min_t(int, length, chunk);
+
+ ret = nand_write_data_op(chip, bufpoi, len, false);
+ if (ret)
+ return ret;
+
+ bufpoi += len;
+ length -= len;
+ }
+ if (length > 0) {
+ ret = nand_write_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @chip: NAND chip object
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * NAND read out-of-band data from the spare area.
+ */
+static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int max_bitflips = 0;
+ int page, realpage, chipnr;
+ struct mtd_ecc_stats stats;
+ int readlen = ops->ooblen;
+ int len;
+ uint8_t *buf = ops->oobbuf;
+ int ret = 0;
+
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
+
+ stats = mtd->ecc_stats;
+
+ len = mtd_oobavail(mtd, ops);
+
+ chipnr = (int)(from >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ /* Shift to get page */
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ while (1) {
+ if (ops->mode == MTD_OPS_RAW)
+ ret = chip->ecc.read_oob_raw(chip, page);
+ else
+ ret = chip->ecc.read_oob(chip, page);
+
+ if (ret < 0)
+ break;
+
+ len = min(len, readlen);
+ buf = nand_transfer_oob(chip, buf, ops, len);
+
+ nand_wait_readrdy(chip);
+
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
+ readlen -= len;
+ if (!readlen)
+ break;
+
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+ nand_deselect_target(chip);
+
+ ops->oobretlen = ops->ooblen - readlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return max_bitflips;
+}
+
+/**
+ * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read data and/or out-of-band data.
+ */
+static int nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
+
+ ops->retlen = 0;
+
+ if (ops->mode != MTD_OPS_PLACE_OOB &&
+ ops->mode != MTD_OPS_AUTO_OOB &&
+ ops->mode != MTD_OPS_RAW)
+ return -ENOTSUPP;
+
+ nand_get_device(chip);
+
+ if (!ops->datbuf)
+ ret = nand_do_read_oob(chip, from, ops);
+ else
+ ret = nand_do_read_ops(chip, from, ops);
+
+ nand_release_device(chip);
+ return ret;
+}
+
+/**
+ * nand_write_page_raw_notsupp - dummy raw page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Returns -ENOTSUPP unconditionally.
+ */
+int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ return -ENOTSUPP;
+}
+
+/**
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+EXPORT_SYMBOL(nand_write_page_raw);
+
+/**
+ * nand_monolithic_write_page_raw - Monolithic page write in raw mode
+ * @chip: NAND chip info structure
+ * @buf: data buffer to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * This is a raw page write, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be sent over the bus and effectively programmed
+ * into the NAND chip arrays in a single operation. This is an
+ * alternative to nand_write_page_raw(), which first sends the main
+ * data, then eventually send the OOB data by latching more data
+ * cycles on the NAND bus, and finally sends the program command to
+ * synchronyze the NAND chip cache.
+ */
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int size = mtd->writesize;
+ u8 *write_buf = (u8 *)buf;
+
+ if (oob_required) {
+ size += mtd->oobsize;
+
+ if (buf != chip->data_buf) {
+ write_buf = nand_get_data_buf(chip);
+ memcpy(write_buf, buf, mtd->writesize);
+ }
+ }
+
+ return nand_prog_page_op(chip, page, 0, write_buf, size);
+}
+EXPORT_SYMBOL(nand_monolithic_write_page_raw);
+
+/**
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * We need a special oob layout and handling even when ECC isn't checked.
+ */
+static int nand_write_page_raw_syndrome(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ ret = nand_write_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size) {
+ ret = nand_write_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+/**
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ const uint8_t *p = buf;
+
+ /* Software ECC calculation */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ return chip->ecc.write_page_raw(chip, buf, 1, page);
+}
+
+/**
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ const uint8_t *p = buf;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+ }
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+
+/**
+ * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ uint8_t *oob_buf = chip->oob_poi;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ uint32_t start_step = offset / ecc_size;
+ uint32_t end_step = (offset + data_len - 1) / ecc_size;
+ int oob_bytes = mtd->oobsize / ecc_steps;
+ int step, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* configure controller for WRITE access */
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ /* write data (untouched subpages already masked by 0xFF) */
+ ret = nand_write_data_op(chip, buf, ecc_size, false);
+ if (ret)
+ return ret;
+
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if ((step < start_step) || (step > end_step))
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ chip->ecc.calculate(chip, buf, ecc_calc);
+
+ /* mask OOB of un-touched subpages by padding 0xFF */
+ /* if oob_required, preserve OOB metadata of written subpage */
+ if (!oob_required || (step < start_step) || (step > end_step))
+ memset(oob_buf, 0xff, oob_bytes);
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ oob_buf += oob_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->ecc.calc_buf;
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* write OOB buffer to NAND device */
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+
+/**
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ int ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ if (chip->ecc.prepad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.calculate(chip, p, oob);
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i) {
+ ret = nand_write_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * nand_write_page - write one page
+ * @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
+ * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ * @raw: use _raw version of write_page
+ */
+static int nand_write_page(struct nand_chip *chip, uint32_t offset,
+ int data_len, const uint8_t *buf, int oob_required,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status, subpage;
+
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ chip->ecc.write_subpage)
+ subpage = offset || (data_len < mtd->writesize);
+ else
+ subpage = 0;
+
+ if (unlikely(raw))
+ status = chip->ecc.write_page_raw(chip, buf, oob_required,
+ page);
+ else if (subpage)
+ status = chip->ecc.write_subpage(chip, offset, data_len, buf,
+ oob_required, page);
+ else
+ status = chip->ecc.write_page(chip, buf, oob_required, page);
+
+ if (status < 0)
+ return status;
+
+ return 0;
+}
+
+#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
+
+/**
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @chip: NAND chip object
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ * NAND write with ECC.
+ */
+static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chipnr, realpage, page, column;
+ uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
+
+ uint8_t *oob = ops->oobbuf;
+ uint8_t *buf = ops->datbuf;
+ int ret;
+ int oob_required = oob ? 1 : 0;
+
+ ops->retlen = 0;
+ if (!writelen)
+ return 0;
+
+ /* Reject writes, which are not page aligned */
+ if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
+ pr_notice("%s: attempt to write non page aligned data\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ column = to & (mtd->writesize - 1);
+
+ chipnr = (int)(to >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ realpage = (int)(to >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ /* Invalidate the page cache, when we write to the cached page */
+ if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
+ ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
+ chip->pagecache.page = -1;
+
+ /* Don't allow multipage oob writes with offset */
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ while (1) {
+ int bytes = mtd->writesize;
+ uint8_t *wbuf = buf;
+ int use_bounce_buf;
+ int part_pagewr = (column || writelen < mtd->writesize);
+
+ if (part_pagewr)
+ use_bounce_buf = 1;
+ else if (chip->options & NAND_USES_DMA)
+ use_bounce_buf = !virt_addr_valid(buf) ||
+ !IS_ALIGNED((unsigned long)buf,
+ chip->buf_align);
+ else
+ use_bounce_buf = 0;
+
+ /*
+ * Copy the data from the initial buffer when doing partial page
+ * writes or when a bounce buffer is required.
+ */
+ if (use_bounce_buf) {
+ pr_debug("%s: using write bounce buffer for buf@%p\n",
+ __func__, buf);
+ if (part_pagewr)
+ bytes = min_t(int, bytes - column, writelen);
+ wbuf = nand_get_data_buf(chip);
+ memset(wbuf, 0xff, mtd->writesize);
+ memcpy(&wbuf[column], buf, bytes);
+ }
+
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ oob = nand_fill_oob(chip, oob, len, ops);
+ oobwritelen -= len;
+ } else {
+ /* We still need to erase leftover OOB data */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+ }
+
+ ret = nand_write_page(chip, column, bytes, wbuf,
+ oob_required, page,
+ (ops->mode == MTD_OPS_RAW));
+ if (ret)
+ break;
+
+ writelen -= bytes;
+ if (!writelen)
+ break;
+
+ column = 0;
+ buf += bytes;
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+
+ ops->retlen = ops->len - writelen;
+ if (unlikely(oob))
+ ops->oobretlen = ops->ooblen;
+
+err_out:
+ nand_deselect_target(chip);
+ return ret;
+}
+
+/**
+ * panic_nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC. Used when performing writes in interrupt context, this
+ * may for example be called by mtdoops when writing an oops while in panic.
+ */
+static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(to >> chip->chip_shift);
+ struct mtd_oob_ops ops;
+ int ret;
+
+ nand_select_target(chip, chipnr);
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(chip, 400);
+
+ memset(&ops, 0, sizeof(ops));
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ ret = nand_do_write_ops(chip, to, &ops);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret = 0;
+
+ ops->retlen = 0;
+
+ nand_get_device(chip);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_write_oob(chip, to, ops);
+ else
+ ret = nand_do_write_ops(chip, to, ops);
+
+out:
+ nand_release_device(chip);
+ return ret;
+}
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks.
+ */
+static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
+}
+
+/**
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @chip: NAND chip object
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks.
+ */
+int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
+ int allowbbt)
+{
+ int page, pages_per_block, ret, chipnr;
+ loff_t len;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
+
+ if (check_offs_len(chip, instr->addr, instr->len))
+ return -EINVAL;
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(chip);
+
+ /* Shift to get first page */
+ page = (int)(instr->addr >> chip->page_shift);
+ chipnr = (int)(instr->addr >> chip->chip_shift);
+
+ /* Calculate pages in each block */
+ pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+
+ /* Select the NAND device */
+ nand_select_target(chip, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ ret = -EIO;
+ goto erase_exit;
+ }
+
+ /* Loop through the pages */
+ len = instr->len;
+
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks! */
+ if (nand_block_checkbad(chip, ((loff_t) page) <<
+ chip->page_shift, allowbbt)) {
+ pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+ __func__, page);
+ ret = -EIO;
+ goto erase_exit;
+ }
+
+ /*
+ * Invalidate the page cache, if we erase the block which
+ * contains the current cached page.
+ */
+ if (page <= chip->pagecache.page && chip->pagecache.page <
+ (page + pages_per_block))
+ chip->pagecache.page = -1;
+
+ ret = nand_erase_op(chip, (page & chip->pagemask) >>
+ (chip->phys_erase_shift - chip->page_shift));
+ if (ret) {
+ pr_debug("%s: failed erase, page 0x%08x\n",
+ __func__, page);
+ instr->fail_addr =
+ ((loff_t)page << chip->page_shift);
+ goto erase_exit;
+ }
+
+ /* Increment page address and decrement length */
+ len -= (1ULL << chip->phys_erase_shift);
+ page += pages_per_block;
+
+ /* Check, if we cross a chip boundary */
+ if (len && !(page & chip->pagemask)) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+
+ ret = 0;
+erase_exit:
+
+ /* Deselect and wake up anyone waiting on the device */
+ nand_deselect_target(chip);
+ nand_release_device(chip);
+
+ /* Return more or less happy */
+ return ret;
+}
+
+/**
+ * nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function.
+ */
+static void nand_sync(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ pr_debug("%s: called\n", __func__);
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(chip);
+ /* Release it and go back */
+ nand_release_device(chip);
+}
+
+/**
+ * nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(offs >> chip->chip_shift);
+ int ret;
+
+ /* Select the NAND device */
+ nand_get_device(chip);
+
+ nand_select_target(chip, chipnr);
+
+ ret = nand_block_checkbad(chip, offs, 0);
+
+ nand_deselect_target(chip);
+ nand_release_device(chip);
+
+ return ret;
+}
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ ret = nand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
+}
+
+/**
+ * nand_suspend - [MTD Interface] Suspend the NAND flash
+ * @mtd: MTD device structure
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_suspend(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret = 0;
+
+ mutex_lock(&chip->lock);
+ if (chip->ops.suspend)
+ ret = chip->ops.suspend(chip);
+ if (!ret)
+ chip->suspended = 1;
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+/**
+ * nand_resume - [MTD Interface] Resume the NAND flash
+ * @mtd: MTD device structure
+ */
+static void nand_resume(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ mutex_lock(&chip->lock);
+ if (chip->suspended) {
+ if (chip->ops.resume)
+ chip->ops.resume(chip);
+ chip->suspended = 0;
+ } else {
+ pr_err("%s called for a chip which is not in suspended state\n",
+ __func__);
+ }
+ mutex_unlock(&chip->lock);
+
+ wake_up_all(&chip->resume_wq);
+}
+
+/**
+ * nand_shutdown - [MTD Interface] Finish the current NAND operation and
+ * prevent further operations
+ * @mtd: MTD device structure
+ */
+static void nand_shutdown(struct mtd_info *mtd)
+{
+ nand_suspend(mtd);
+}
+
+/**
+ * nand_lock - [MTD Interface] Lock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to lock (must be a multiple of block/page size)
+ */
+static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->ops.lock_area)
+ return -ENOTSUPP;
+
+ return chip->ops.lock_area(chip, ofs, len);
+}
+
+/**
+ * nand_unlock - [MTD Interface] Unlock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to unlock (must be a multiple of block/page size)
+ */
+static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->ops.unlock_area)
+ return -ENOTSUPP;
+
+ return chip->ops.unlock_area(chip, ofs, len);
+}
+
+/* Set default functions */
+static void nand_set_defaults(struct nand_chip *chip)
+{
+ /* If no controller is provided, use the dummy, legacy one. */
+ if (!chip->controller) {
+ chip->controller = &chip->legacy.dummy_controller;
+ nand_controller_init(chip->controller);
+ }
+
+ nand_legacy_set_defaults(chip);
+
+ if (!chip->buf_align)
+ chip->buf_align = 1;
+}
+
+/* Sanitize ONFI strings so we can safely print them */
+void sanitize_string(uint8_t *s, size_t len)
+{
+ ssize_t i;
+
+ /* Null terminate */
+ s[len - 1] = 0;
+
+ /* Remove non printable chars */
+ for (i = 0; i < len - 1; i++) {
+ if (s[i] < ' ' || s[i] > 127)
+ s[i] = '?';
+ }
+
+ /* Remove trailing spaces */
+ strim(s);
+}
+
+/*
+ * nand_id_has_period - Check if an ID string has a given wraparound period
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+ * @period: the period of repitition
+ *
+ * Check if an ID string is repeated within a given sequence of bytes at
+ * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
+ * if the repetition has a period of @period; otherwise, returns zero.
+ */
+static int nand_id_has_period(u8 *id_data, int arrlen, int period)
+{
+ int i, j;
+ for (i = 0; i < period; i++)
+ for (j = i + period; j < arrlen; j += period)
+ if (id_data[i] != id_data[j])
+ return 0;
+ return 1;
+}
+
+/*
+ * nand_id_len - Get the length of an ID string returned by CMD_READID
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+
+ * Returns the length of the ID string, according to known wraparound/trailing
+ * zero patterns. If no pattern exists, returns the length of the array.
+ */
+static int nand_id_len(u8 *id_data, int arrlen)
+{
+ int last_nonzero, period;
+
+ /* Find last non-zero byte */
+ for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
+ if (id_data[last_nonzero])
+ break;
+
+ /* All zeros */
+ if (last_nonzero < 0)
+ return 0;
+
+ /* Calculate wraparound period */
+ for (period = 1; period < arrlen; period++)
+ if (nand_id_has_period(id_data, arrlen, period))
+ break;
+
+ /* There's a repeated pattern */
+ if (period < arrlen)
+ return period;
+
+ /* There are trailing zeros */
+ if (last_nonzero < arrlen - 1)
+ return last_nonzero + 1;
+
+ /* No pattern detected */
+ return arrlen;
+}
+
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+ int bits;
+
+ bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+ bits >>= NAND_CI_CELLTYPE_SHIFT;
+ return bits + 1;
+}
+
+/*
+ * Many new NAND share similar device ID codes, which represent the size of the
+ * chip. The rest of the parameters must be decoded according to generic or
+ * manufacturer-specific "extended ID" decoding patterns.
+ */
+void nand_decode_ext_id(struct nand_chip *chip)
+{
+ struct nand_memory_organization *memorg;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int extid;
+ u8 *id_data = chip->id.data;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* The 3rd id byte holds MLC / multichip data */
+ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ /* The 4th id byte is the important one */
+ extid = id_data[3];
+
+ /* Calc pagesize */
+ memorg->pagesize = 1024 << (extid & 0x03);
+ mtd->writesize = memorg->pagesize;
+ extid >>= 2;
+ /* Calc oobsize */
+ memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
+ mtd->oobsize = memorg->oobsize;
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
+ memorg->pagesize;
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ if (extid & 0x1)
+ chip->options |= NAND_BUSWIDTH_16;
+}
+EXPORT_SYMBOL_GPL(nand_decode_ext_id);
+
+/*
+ * Old devices have chip data hardcoded in the device ID table. nand_decode_id
+ * decodes a matching ID table entry and assigns the MTD size parameters for
+ * the chip.
+ */
+static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
+ mtd->erasesize = type->erasesize;
+ memorg->pagesize = type->pagesize;
+ mtd->writesize = memorg->pagesize;
+ memorg->oobsize = memorg->pagesize / 32;
+ mtd->oobsize = memorg->oobsize;
+
+ /* All legacy ID NAND are small-page, SLC */
+ memorg->bits_per_cell = 1;
+}
+
+/*
+ * Set the bad block marker/indicator (BBM/BBI) patterns according to some
+ * heuristic patterns using various detected parameters (e.g., manufacturer,
+ * page size, cell-type information).
+ */
+static void nand_decode_bbm_options(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Set the bad block position */
+ if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
+ chip->badblockpos = NAND_BBM_POS_LARGE;
+ else
+ chip->badblockpos = NAND_BBM_POS_SMALL;
+}
+
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+ return type->id_len;
+}
+
+static bool find_full_id_nand(struct nand_chip *chip,
+ struct nand_flash_dev *type)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ u8 *id_data = chip->id.data;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ if (!strncmp(type->id, id_data, type->id_len)) {
+ memorg->pagesize = type->pagesize;
+ mtd->writesize = memorg->pagesize;
+ memorg->pages_per_eraseblock = type->erasesize /
+ type->pagesize;
+ mtd->erasesize = type->erasesize;
+ memorg->oobsize = type->oobsize;
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ memorg->eraseblocks_per_lun =
+ DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
+ memorg->pagesize *
+ memorg->pages_per_eraseblock);
+ chip->options |= type->options;
+ requirements.strength = NAND_ECC_STRENGTH(type);
+ requirements.step_size = NAND_ECC_STEP(type);
+ nanddev_set_ecc_requirements(base, &requirements);
+
+ chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
+ if (!chip->parameters.model)
+ return false;
+
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
+ * compliant and does not have a full-id or legacy-id entry in the nand_ids
+ * table.
+ */
+static void nand_manufacturer_detect(struct nand_chip *chip)
+{
+ /*
+ * Try manufacturer detection if available and use
+ * nand_decode_ext_id() otherwise.
+ */
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->detect) {
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* The 3rd id byte holds MLC / multichip data */
+ memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
+ chip->manufacturer.desc->ops->detect(chip);
+ } else {
+ nand_decode_ext_id(chip);
+ }
+}
+
+/*
+ * Manufacturer initialization. This function is called for all NANDs including
+ * ONFI and JEDEC compliant ones.
+ * Manufacturer drivers should put all their specific initialization code in
+ * their ->init() hook.
+ */
+static int nand_manufacturer_init(struct nand_chip *chip)
+{
+ if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
+ !chip->manufacturer.desc->ops->init)
+ return 0;
+
+ return chip->manufacturer.desc->ops->init(chip);
+}
+
+/*
+ * Manufacturer cleanup. This function is called for all NANDs including
+ * ONFI and JEDEC compliant ones.
+ * Manufacturer drivers should put all their specific cleanup code in their
+ * ->cleanup() hook.
+ */
+static void nand_manufacturer_cleanup(struct nand_chip *chip)
+{
+ /* Release manufacturer private data */
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->cleanup)
+ chip->manufacturer.desc->ops->cleanup(chip);
+}
+
+static const char *
+nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
+{
+ return manufacturer_desc ? manufacturer_desc->name : "Unknown";
+}
+
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported.
+ */
+static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
+{
+ const struct nand_manufacturer_desc *manufacturer_desc;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ int busw, ret;
+ u8 *id_data = chip->id.data;
+ u8 maf_id, dev_id;
+ u64 targetsize;
+
+ /*
+ * Let's start by initializing memorg fields that might be left
+ * unassigned by the ID-based detection logic.
+ */
+ memorg = nanddev_get_memorg(&chip->base);
+ memorg->planes_per_lun = 1;
+ memorg->luns_per_target = 1;
+
+ /*
+ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+ * after power-up.
+ */
+ ret = nand_reset(chip, 0);
+ if (ret)
+ return ret;
+
+ /* Select the device */
+ nand_select_target(chip, 0);
+
+ /* Send the command for reading device ID */
+ ret = nand_readid_op(chip, 0, id_data, 2);
+ if (ret)
+ return ret;
+
+ /* Read manufacturer and device IDs */
+ maf_id = id_data[0];
+ dev_id = id_data[1];
+
+ /*
+ * Try again to make sure, as some systems the bus-hold or other
+ * interface concerns can cause random data which looks like a
+ * possibly credible NAND flash to appear. If the two results do
+ * not match, ignore the device completely.
+ */
+
+ /* Read entire ID string */
+ ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
+ if (ret)
+ return ret;
+
+ if (id_data[0] != maf_id || id_data[1] != dev_id) {
+ pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
+ maf_id, dev_id, id_data[0], id_data[1]);
+ return -ENODEV;
+ }
+
+ chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
+
+ /* Try to identify manufacturer */
+ manufacturer_desc = nand_get_manufacturer_desc(maf_id);
+ chip->manufacturer.desc = manufacturer_desc;
+
+ if (!type)
+ type = nand_flash_ids;
+
+ /*
+ * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
+ * override it.
+ * This is required to make sure initial NAND bus width set by the
+ * NAND controller driver is coherent with the real NAND bus width
+ * (extracted by auto-detection code).
+ */
+ busw = chip->options & NAND_BUSWIDTH_16;
+
+ /*
+ * The flag is only set (never cleared), reset it to its default value
+ * before starting auto-detection.
+ */
+ chip->options &= ~NAND_BUSWIDTH_16;
+
+ for (; type->name != NULL; type++) {
+ if (is_full_id_nand(type)) {
+ if (find_full_id_nand(chip, type))
+ goto ident_done;
+ } else if (dev_id == type->dev_id) {
+ break;
+ }
+ }
+
+ if (!type->name || !type->pagesize) {
+ /* Check if the chip is ONFI compliant */
+ ret = nand_onfi_detect(chip);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ goto ident_done;
+
+ /* Check if the chip is JEDEC compliant */
+ ret = nand_jedec_detect(chip);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ goto ident_done;
+ }
+
+ if (!type->name)
+ return -ENODEV;
+
+ chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
+ if (!chip->parameters.model)
+ return -ENOMEM;
+
+ if (!type->pagesize)
+ nand_manufacturer_detect(chip);
+ else
+ nand_decode_id(chip, type);
+
+ /* Get chip options */
+ chip->options |= type->options;
+
+ memorg->eraseblocks_per_lun =
+ DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
+ memorg->pagesize *
+ memorg->pages_per_eraseblock);
+
+ident_done:
+ if (!mtd->name)
+ mtd->name = chip->parameters.model;
+
+ if (chip->options & NAND_BUSWIDTH_AUTO) {
+ WARN_ON(busw & NAND_BUSWIDTH_16);
+ nand_set_defaults(chip);
+ } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct!
+ */
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ maf_id, dev_id);
+ pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
+ mtd->name);
+ pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
+ ret = -EINVAL;
+
+ goto free_detect_allocation;
+ }
+
+ nand_decode_bbm_options(chip);
+
+ /* Calculate the address shift from the page size */
+ chip->page_shift = ffs(mtd->writesize) - 1;
+ /* Convert chipsize to number of pages per chip -1 */
+ targetsize = nanddev_target_size(&chip->base);
+ chip->pagemask = (targetsize >> chip->page_shift) - 1;
+
+ chip->bbt_erase_shift = chip->phys_erase_shift =
+ ffs(mtd->erasesize) - 1;
+ if (targetsize & 0xffffffff)
+ chip->chip_shift = ffs((unsigned)targetsize) - 1;
+ else {
+ chip->chip_shift = ffs((unsigned)(targetsize >> 32));
+ chip->chip_shift += 32 - 1;
+ }
+
+ if (chip->chip_shift - chip->page_shift > 16)
+ chip->options |= NAND_ROW_ADDR_3;
+
+ chip->badblockbits = 8;
+
+ nand_legacy_adjust_cmdfunc(chip);
+
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ maf_id, dev_id);
+ pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
+ chip->parameters.model);
+ pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
+ (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+ mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
+ return 0;
+
+free_detect_allocation:
+ kfree(chip->parameters.model);
+
+ return ret;
+}
+
+static enum nand_ecc_engine_type
+of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
+{
+ enum nand_ecc_legacy_mode {
+ NAND_ECC_INVALID,
+ NAND_ECC_NONE,
+ NAND_ECC_SOFT,
+ NAND_ECC_SOFT_BCH,
+ NAND_ECC_HW,
+ NAND_ECC_HW_SYNDROME,
+ NAND_ECC_ON_DIE,
+ };
+ const char * const nand_ecc_legacy_modes[] = {
+ [NAND_ECC_NONE] = "none",
+ [NAND_ECC_SOFT] = "soft",
+ [NAND_ECC_SOFT_BCH] = "soft_bch",
+ [NAND_ECC_HW] = "hw",
+ [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
+ [NAND_ECC_ON_DIE] = "on-die",
+ };
+ enum nand_ecc_legacy_mode eng_type;
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (err)
+ return NAND_ECC_ENGINE_TYPE_INVALID;
+
+ for (eng_type = NAND_ECC_NONE;
+ eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
+ if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
+ switch (eng_type) {
+ case NAND_ECC_NONE:
+ return NAND_ECC_ENGINE_TYPE_NONE;
+ case NAND_ECC_SOFT:
+ case NAND_ECC_SOFT_BCH:
+ return NAND_ECC_ENGINE_TYPE_SOFT;
+ case NAND_ECC_HW:
+ case NAND_ECC_HW_SYNDROME:
+ return NAND_ECC_ENGINE_TYPE_ON_HOST;
+ case NAND_ECC_ON_DIE:
+ return NAND_ECC_ENGINE_TYPE_ON_DIE;
+ default:
+ break;
+ }
+ }
+ }
+
+ return NAND_ECC_ENGINE_TYPE_INVALID;
+}
+
+static enum nand_ecc_placement
+of_get_rawnand_ecc_placement_legacy(struct device_node *np)
+{
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (!err) {
+ if (!strcasecmp(pm, "hw_syndrome"))
+ return NAND_ECC_PLACEMENT_INTERLEAVED;
+ }
+
+ return NAND_ECC_PLACEMENT_UNKNOWN;
+}
+
+static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
+{
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (!err) {
+ if (!strcasecmp(pm, "soft"))
+ return NAND_ECC_ALGO_HAMMING;
+ else if (!strcasecmp(pm, "soft_bch"))
+ return NAND_ECC_ALGO_BCH;
+ }
+
+ return NAND_ECC_ALGO_UNKNOWN;
+}
+
+static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
+{
+ struct device_node *dn = nand_get_flash_node(chip);
+ struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
+
+ if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
+
+ if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
+ user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
+
+ if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
+ user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
+}
+
+static int of_get_nand_bus_width(struct device_node *np)
+{
+ u32 val;
+
+ if (of_property_read_u32(np, "nand-bus-width", &val))
+ return 8;
+
+ switch (val) {
+ case 8:
+ case 16:
+ return val;
+ default:
+ return -EIO;
+ }
+}
+
+static bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+ return of_property_read_bool(np, "nand-on-flash-bbt");
+}
+
+static int rawnand_dt_init(struct nand_chip *chip)
+{
+ struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
+ struct device_node *dn = nand_get_flash_node(chip);
+
+ if (!dn)
+ return 0;
+
+ if (of_get_nand_bus_width(dn) == 16)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (of_property_read_bool(dn, "nand-is-boot-medium"))
+ chip->options |= NAND_IS_BOOT_MEDIUM;
+
+ if (of_get_nand_on_flash_bbt(dn))
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ of_get_nand_ecc_user_config(nand);
+ of_get_nand_ecc_legacy_user_config(chip);
+
+ /*
+ * If neither the user nor the NAND controller have requested a specific
+ * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
+ */
+ nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ /*
+ * Use the user requested engine type, unless there is none, in this
+ * case default to the NAND controller choice, otherwise fallback to
+ * the raw NAND default one.
+ */
+ if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
+ chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ chip->ecc.engine_type = nand->ecc.defaults.engine_type;
+
+ chip->ecc.placement = nand->ecc.user_conf.placement;
+ chip->ecc.algo = nand->ecc.user_conf.algo;
+ chip->ecc.strength = nand->ecc.user_conf.strength;
+ chip->ecc.size = nand->ecc.user_conf.step_size;
+
+ return 0;
+}
+
+/**
+ * nand_scan_ident - Scan for the NAND device
+ * @chip: NAND chip object
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
+ *
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
+ *
+ * This helper used to be called directly from controller drivers that needed
+ * to tweak some ECC-related parameters before nand_scan_tail(). This separation
+ * prevented dynamic allocations during this phase which was unconvenient and
+ * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
+ */
+static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
+ struct nand_flash_dev *table)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ int nand_maf_id, nand_dev_id;
+ unsigned int i;
+ int ret;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* Assume all dies are deselected when we enter nand_scan_ident(). */
+ chip->cur_cs = -1;
+
+ mutex_init(&chip->lock);
+ init_waitqueue_head(&chip->resume_wq);
+
+ /* Enforce the right timings for reset/detection */
+ chip->current_interface_config = nand_get_reset_interface_config();
+
+ ret = rawnand_dt_init(chip);
+ if (ret)
+ return ret;
+
+ if (!mtd->name && mtd->dev.parent)
+ mtd->name = dev_name(mtd->dev.parent);
+
+ /* Set the default functions */
+ nand_set_defaults(chip);
+
+ ret = nand_legacy_check_hooks(chip);
+ if (ret)
+ return ret;
+
+ memorg->ntargets = maxchips;
+
+ /* Read the flash type */
+ ret = nand_detect(chip, table);
+ if (ret) {
+ if (!(chip->options & NAND_SCAN_SILENT_NODEV))
+ pr_warn("No NAND device found\n");
+ nand_deselect_target(chip);
+ return ret;
+ }
+
+ nand_maf_id = chip->id.data[0];
+ nand_dev_id = chip->id.data[1];
+
+ nand_deselect_target(chip);
+
+ /* Check for a chip array */
+ for (i = 1; i < maxchips; i++) {
+ u8 id[2];
+
+ /* See comment in nand_get_flash_type for reset */
+ ret = nand_reset(chip, i);
+ if (ret)
+ break;
+
+ nand_select_target(chip, i);
+ /* Send the command for reading device ID */
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ break;
+ /* Read manufacturer and device IDs */
+ if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
+ nand_deselect_target(chip);
+ break;
+ }
+ nand_deselect_target(chip);
+ }
+ if (i > 1)
+ pr_info("%d chips detected\n", i);
+
+ /* Store the number of chips and calc total size for mtd */
+ memorg->ntargets = i;
+ mtd->size = i * nanddev_target_size(&chip->base);
+
+ return 0;
+}
+
+static void nand_scan_ident_cleanup(struct nand_chip *chip)
+{
+ kfree(chip->parameters.model);
+ kfree(chip->parameters.onfi);
+}
+
+static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ switch (ecc->placement) {
+ case NAND_ECC_PLACEMENT_UNKNOWN:
+ case NAND_ECC_PLACEMENT_OOB:
+ /* Use standard hwecc read page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_hwecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->read_subpage)
+ ecc->read_subpage = nand_read_subpage;
+ if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
+ ecc->write_subpage = nand_write_subpage_hwecc;
+ fallthrough;
+
+ case NAND_ECC_PLACEMENT_INTERLEAVED:
+ if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+ (!ecc->read_page ||
+ ecc->read_page == nand_read_page_hwecc ||
+ !ecc->write_page ||
+ ecc->write_page == nand_write_page_hwecc)) {
+ WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+ return -EINVAL;
+ }
+ /* Use standard syndrome read/write page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_syndrome;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_syndrome;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw_syndrome;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw_syndrome;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_syndrome;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_syndrome;
+ break;
+
+ default:
+ pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
+ ecc->placement);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nand_set_ecc_soft_ops(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
+ return -EINVAL;
+
+ switch (ecc->algo) {
+ case NAND_ECC_ALGO_HAMMING:
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->size)
+ ecc->size = 256;
+ ecc->bytes = 3;
+ ecc->strength = 1;
+
+ if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
+ ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
+ return 0;
+ case NAND_ECC_ALGO_BCH:
+ if (!mtd_nand_has_bch()) {
+ WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
+ return -EINVAL;
+ }
+ ecc->calculate = nand_bch_calculate_ecc;
+ ecc->correct = nand_bch_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+
+ /*
+ * Board driver should supply ecc.size and ecc.strength
+ * values to select how many bits are correctable.
+ * Otherwise, default to 4 bits for large page devices.
+ */
+ if (!ecc->size && (mtd->oobsize >= 64)) {
+ ecc->size = 512;
+ ecc->strength = 4;
+ }
+
+ /*
+ * if no ecc placement scheme was provided pickup the default
+ * large page one.
+ */
+ if (!mtd->ooblayout) {
+ /* handle large page devices only */
+ if (mtd->oobsize < 64) {
+ WARN(1, "OOB layout is required when using software BCH on small pages\n");
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+
+ }
+
+ /*
+ * We can only maximize ECC config when the default layout is
+ * used, otherwise we don't know how many bytes can really be
+ * used.
+ */
+ if (mtd->ooblayout == nand_get_large_page_ooblayout() &&
+ nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
+ int steps, bytes;
+
+ /* Always prefer 1k blocks over 512bytes ones */
+ ecc->size = 1024;
+ steps = mtd->writesize / ecc->size;
+
+ /* Reserve 2 bytes for the BBM */
+ bytes = (mtd->oobsize - 2) / steps;
+ ecc->strength = bytes * 8 / fls(8 * ecc->size);
+ }
+
+ /* See nand_bch_init() for details. */
+ ecc->bytes = 0;
+ ecc->priv = nand_bch_init(mtd);
+ if (!ecc->priv) {
+ WARN(1, "BCH ECC initialization failed!\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ WARN(1, "Unsupported ECC algorithm!\n");
+ return -EINVAL;
+ }
+}
+
+/**
+ * nand_check_ecc_caps - check the sanity of preset ECC settings
+ * @chip: nand chip info structure
+ * @caps: ECC caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * When ECC step size and strength are already set, check if they are supported
+ * by the controller and the calculated ECC bytes fit within the chip's OOB.
+ * On success, the calculated ECC bytes is set.
+ */
+static int
+nand_check_ecc_caps(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int preset_step = chip->ecc.size;
+ int preset_strength = chip->ecc.strength;
+ int ecc_bytes, nsteps = mtd->writesize / preset_step;
+ int i, j;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+
+ if (stepinfo->stepsize != preset_step)
+ continue;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ if (stepinfo->strengths[j] != preset_strength)
+ continue;
+
+ ecc_bytes = caps->calc_ecc_bytes(preset_step,
+ preset_strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ return ecc_bytes;
+
+ if (ecc_bytes * nsteps > oobavail) {
+ pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
+ preset_step, preset_strength);
+ return -ENOSPC;
+ }
+
+ chip->ecc.bytes = ecc_bytes;
+
+ return 0;
+ }
+ }
+
+ pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
+ preset_step, preset_strength);
+
+ return -ENOTSUPP;
+}
+
+/**
+ * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * If a chip's ECC requirement is provided, try to meet it with the least
+ * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
+ * On success, the chosen ECC settings are set.
+ */
+static int
+nand_match_ecc_req(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int req_step = requirements->step_size;
+ int req_strength = requirements->strength;
+ int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
+ int best_step, best_strength, best_ecc_bytes;
+ int best_ecc_bytes_total = INT_MAX;
+ int i, j;
+
+ /* No information provided by the NAND chip */
+ if (!req_step || !req_strength)
+ return -ENOTSUPP;
+
+ /* number of correctable bits the chip requires in a page */
+ req_corr = mtd->writesize / req_step * req_strength;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+ step_size = stepinfo->stepsize;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ strength = stepinfo->strengths[j];
+
+ /*
+ * If both step size and strength are smaller than the
+ * chip's requirement, it is not easy to compare the
+ * resulted reliability.
+ */
+ if (step_size < req_step && strength < req_strength)
+ continue;
+
+ if (mtd->writesize % step_size)
+ continue;
+
+ nsteps = mtd->writesize / step_size;
+
+ ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ continue;
+ ecc_bytes_total = ecc_bytes * nsteps;
+
+ if (ecc_bytes_total > oobavail ||
+ strength * nsteps < req_corr)
+ continue;
+
+ /*
+ * We assume the best is to meet the chip's requrement
+ * with the least number of ECC bytes.
+ */
+ if (ecc_bytes_total < best_ecc_bytes_total) {
+ best_ecc_bytes_total = ecc_bytes_total;
+ best_step = step_size;
+ best_strength = strength;
+ best_ecc_bytes = ecc_bytes;
+ }
+ }
+ }
+
+ if (best_ecc_bytes_total == INT_MAX)
+ return -ENOTSUPP;
+
+ chip->ecc.size = best_step;
+ chip->ecc.strength = best_strength;
+ chip->ecc.bytes = best_ecc_bytes;
+
+ return 0;
+}
+
+/**
+ * nand_maximize_ecc - choose the max ECC strength available
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * Choose the max ECC strength that is supported on the controller, and can fit
+ * within the chip's OOB. On success, the chosen ECC settings are set.
+ */
+static int
+nand_maximize_ecc(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int step_size, strength, nsteps, ecc_bytes, corr;
+ int best_corr = 0;
+ int best_step = 0;
+ int best_strength, best_ecc_bytes;
+ int i, j;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+ step_size = stepinfo->stepsize;
+
+ /* If chip->ecc.size is already set, respect it */
+ if (chip->ecc.size && step_size != chip->ecc.size)
+ continue;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ strength = stepinfo->strengths[j];
+
+ if (mtd->writesize % step_size)
+ continue;
+
+ nsteps = mtd->writesize / step_size;
+
+ ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ continue;
+
+ if (ecc_bytes * nsteps > oobavail)
+ continue;
+
+ corr = strength * nsteps;
+
+ /*
+ * If the number of correctable bits is the same,
+ * bigger step_size has more reliability.
+ */
+ if (corr > best_corr ||
+ (corr == best_corr && step_size > best_step)) {
+ best_corr = corr;
+ best_step = step_size;
+ best_strength = strength;
+ best_ecc_bytes = ecc_bytes;
+ }
+ }
+ }
+
+ if (!best_corr)
+ return -ENOTSUPP;
+
+ chip->ecc.size = best_step;
+ chip->ecc.strength = best_strength;
+ chip->ecc.bytes = best_ecc_bytes;
+
+ return 0;
+}
+
+/**
+ * nand_ecc_choose_conf - Set the ECC strength and ECC step size
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * Choose the ECC configuration according to following logic.
+ *
+ * 1. If both ECC step size and ECC strength are already set (usually by DT)
+ * then check if it is supported by this controller.
+ * 2. If the user provided the nand-ecc-maximize property, then select maximum
+ * ECC strength.
+ * 3. Otherwise, try to match the ECC step size and ECC strength closest
+ * to the chip's requirement. If available OOB size can't fit the chip
+ * requirement then fallback to the maximum ECC step size and ECC strength.
+ *
+ * On success, the chosen ECC settings are set.
+ */
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
+
+ if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
+ return -EINVAL;
+
+ if (chip->ecc.size && chip->ecc.strength)
+ return nand_check_ecc_caps(chip, caps, oobavail);
+
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
+ return nand_maximize_ecc(chip, caps, oobavail);
+
+ if (!nand_match_ecc_req(chip, caps, oobavail))
+ return 0;
+
+ return nand_maximize_ecc(chip, caps, oobavail);
+}
+EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
+
+static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+ unsigned int eb = nanddev_pos_to_row(nand, pos);
+ int ret;
+
+ eb >>= nand->rowconv.eraseblock_addr_shift;
+
+ nand_select_target(chip, pos->target);
+ ret = nand_erase_op(chip, eb);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static int rawnand_markbad(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+
+ return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
+}
+
+static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+ int ret;
+
+ nand_select_target(chip, pos->target);
+ ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static const struct nand_ops rawnand_ops = {
+ .erase = rawnand_erase,
+ .markbad = rawnand_markbad,
+ .isbad = rawnand_isbad,
+};
+
+/**
+ * nand_scan_tail - Scan for the NAND device
+ * @chip: NAND chip object
+ *
+ * This is the second phase of the normal nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults and scans for a
+ * bad block table if appropriate.
+ */
+static int nand_scan_tail(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i;
+
+ /* New bad blocks should be marked in OOB, flash-based BBT, or both */
+ if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+ !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
+ return -EINVAL;
+ }
+
+ chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!chip->data_buf)
+ return -ENOMEM;
+
+ /*
+ * FIXME: some NAND manufacturer drivers expect the first die to be
+ * selected when manufacturer->init() is called. They should be fixed
+ * to explictly select the relevant die when interacting with the NAND
+ * chip.
+ */
+ nand_select_target(chip, 0);
+ ret = nand_manufacturer_init(chip);
+ nand_deselect_target(chip);
+ if (ret)
+ goto err_free_buf;
+
+ /* Set the internal oob buffer location, just after the page data */
+ chip->oob_poi = chip->data_buf + mtd->writesize;
+
+ /*
+ * If no default placement scheme is given, select an appropriate one.
+ */
+ if (!mtd->ooblayout &&
+ !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ ecc->algo == NAND_ECC_ALGO_BCH)) {
+ switch (mtd->oobsize) {
+ case 8:
+ case 16:
+ mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
+ break;
+ case 64:
+ case 128:
+ mtd_set_ooblayout(mtd,
+ nand_get_large_page_hamming_ooblayout());
+ break;
+ default:
+ /*
+ * Expose the whole OOB area to users if ECC_NONE
+ * is passed. We could do that for all kind of
+ * ->oobsize, but we must keep the old large/small
+ * page with ECC layout when ->oobsize <= 128 for
+ * compatibility reasons.
+ */
+ if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
+ mtd_set_ooblayout(mtd,
+ nand_get_large_page_ooblayout());
+ break;
+ }
+
+ WARN(1, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+ }
+
+ /*
+ * Check ECC mode, default to software if 3byte/512byte hardware ECC is
+ * selected and we have 256 byte pagesize fallback to software ECC
+ */
+
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = nand_set_ecc_on_host_ops(chip);
+ if (ret)
+ goto err_nand_manuf_cleanup;
+
+ if (mtd->writesize >= ecc->size) {
+ if (!ecc->strength) {
+ WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+ break;
+ }
+ pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
+ ecc->size, mtd->writesize);
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ ecc->algo = NAND_ECC_ALGO_HAMMING;
+ fallthrough;
+
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ ret = nand_set_ecc_soft_ops(chip);
+ if (ret)
+ goto err_nand_manuf_cleanup;
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ if (!ecc->read_page || !ecc->write_page) {
+ WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
+ ecc->read_page = nand_read_page_raw;
+ ecc->write_page = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->write_oob = nand_write_oob_std;
+ ecc->size = mtd->writesize;
+ ecc->bytes = 0;
+ ecc->strength = 0;
+ break;
+
+ default:
+ WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+
+ if (ecc->correct || ecc->calculate) {
+ ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ if (!ecc->calc_buf || !ecc->code_buf) {
+ ret = -ENOMEM;
+ goto err_nand_manuf_cleanup;
+ }
+ }
+
+ /* For many systems, the standard OOB write also works for raw */
+ if (!ecc->read_oob_raw)
+ ecc->read_oob_raw = ecc->read_oob;
+ if (!ecc->write_oob_raw)
+ ecc->write_oob_raw = ecc->write_oob;
+
+ /* propagate ecc info to mtd_info */
+ mtd->ecc_strength = ecc->strength;
+ mtd->ecc_step_size = ecc->size;
+
+ /*
+ * Set the number of read / write steps for one page depending on ECC
+ * mode.
+ */
+ ecc->steps = mtd->writesize / ecc->size;
+ if (ecc->steps * ecc->size != mtd->writesize) {
+ WARN(1, "Invalid ECC parameters\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+
+ ecc->total = ecc->steps * ecc->bytes;
+ chip->base.ecc.ctx.total = ecc->total;
+
+ if (ecc->total > mtd->oobsize) {
+ WARN(1, "Total number of ECC bytes exceeded oobsize\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area.
+ */
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ ret = 0;
+
+ mtd->oobavail = ret;
+
+ /* ECC sanity check: warn if it's too weak */
+ if (!nand_ecc_is_strong_enough(&chip->base))
+ pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
+ mtd->name, chip->ecc.strength, chip->ecc.size,
+ nanddev_get_ecc_requirements(&chip->base)->strength,
+ nanddev_get_ecc_requirements(&chip->base)->step_size);
+
+ /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
+ switch (ecc->steps) {
+ case 2:
+ mtd->subpage_sft = 1;
+ break;
+ case 4:
+ case 8:
+ case 16:
+ mtd->subpage_sft = 2;
+ break;
+ }
+ }
+ chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ /* Invalidate the pagebuffer reference */
+ chip->pagecache.page = -1;
+
+ /* Large page NAND with SOFT_ECC should support subpage reads */
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ if (chip->page_shift > 9)
+ chip->options |= NAND_SUBPAGE_READ;
+ break;
+
+ default:
+ break;
+ }
+
+ ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
+ if (ret)
+ goto err_nand_manuf_cleanup;
+
+ /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
+ if (chip->options & NAND_ROM)
+ mtd->flags = MTD_CAP_ROM;
+
+ /* Fill in remaining MTD driver data */
+ mtd->_erase = nand_erase;
+ mtd->_point = NULL;
+ mtd->_unpoint = NULL;
+ mtd->_panic_write = panic_nand_write;
+ mtd->_read_oob = nand_read_oob;
+ mtd->_write_oob = nand_write_oob;
+ mtd->_sync = nand_sync;
+ mtd->_lock = nand_lock;
+ mtd->_unlock = nand_unlock;
+ mtd->_suspend = nand_suspend;
+ mtd->_resume = nand_resume;
+ mtd->_reboot = nand_shutdown;
+ mtd->_block_isreserved = nand_block_isreserved;
+ mtd->_block_isbad = nand_block_isbad;
+ mtd->_block_markbad = nand_block_markbad;
+ mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
+
+ /*
+ * Initialize bitflip_threshold to its default prior scan_bbt() call.
+ * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+ * properly set.
+ */
+ if (!mtd->bitflip_threshold)
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
+
+ /* Find the fastest data interface for this chip */
+ ret = nand_choose_interface_config(chip);
+ if (ret)
+ goto err_nanddev_cleanup;
+
+ /* Enter fastest possible mode on all dies. */
+ for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
+ ret = nand_setup_interface(chip, i);
+ if (ret)
+ goto err_free_interface_config;
+ }
+
+ /* Check, if we should skip the bad block table scan */
+ if (chip->options & NAND_SKIP_BBTSCAN)
+ return 0;
+
+ /* Build bad block table */
+ ret = nand_create_bbt(chip);
+ if (ret)
+ goto err_free_interface_config;
+
+ return 0;
+
+err_free_interface_config:
+ kfree(chip->best_interface_config);
+
+err_nanddev_cleanup:
+ nanddev_cleanup(&chip->base);
+
+err_nand_manuf_cleanup:
+ nand_manufacturer_cleanup(chip);
+
+err_free_buf:
+ kfree(chip->data_buf);
+ kfree(ecc->code_buf);
+ kfree(ecc->calc_buf);
+
+ return ret;
+}
+
+static int nand_attach(struct nand_chip *chip)
+{
+ if (chip->controller->ops && chip->controller->ops->attach_chip)
+ return chip->controller->ops->attach_chip(chip);
+
+ return 0;
+}
+
+static void nand_detach(struct nand_chip *chip)
+{
+ if (chip->controller->ops && chip->controller->ops->detach_chip)
+ chip->controller->ops->detach_chip(chip);
+}
+
+/**
+ * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
+ * @chip: NAND chip object
+ * @maxchips: number of chips to scan for.
+ * @ids: optional flash IDs table
+ *
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+ * appropriate values.
+ */
+int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
+ struct nand_flash_dev *ids)
+{
+ int ret;
+
+ if (!maxchips)
+ return -EINVAL;
+
+ ret = nand_scan_ident(chip, maxchips, ids);
+ if (ret)
+ return ret;
+
+ ret = nand_attach(chip);
+ if (ret)
+ goto cleanup_ident;
+
+ ret = nand_scan_tail(chip);
+ if (ret)
+ goto detach_chip;
+
+ return 0;
+
+detach_chip:
+ nand_detach(chip);
+cleanup_ident:
+ nand_scan_ident_cleanup(chip);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_scan_with_ids);
+
+/**
+ * nand_cleanup - [NAND Interface] Free resources held by the NAND device
+ * @chip: NAND chip object
+ */
+void nand_cleanup(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_BCH)
+ nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
+
+ nanddev_cleanup(&chip->base);
+
+ /* Free bad block table memory */
+ kfree(chip->bbt);
+ kfree(chip->data_buf);
+ kfree(chip->ecc.code_buf);
+ kfree(chip->ecc.calc_buf);
+
+ /* Free bad block descriptor memory */
+ if (chip->badblock_pattern && chip->badblock_pattern->options
+ & NAND_BBT_DYNAMICSTRUCT)
+ kfree(chip->badblock_pattern);
+
+ /* Free the data interface */
+ kfree(chip->best_interface_config);
+
+ /* Free manufacturer priv data. */
+ nand_manufacturer_cleanup(chip);
+
+ /* Free controller specific allocations after chip identification */
+ nand_detach(chip);
+
+ /* Free identification phase allocations */
+ nand_scan_ident_cleanup(chip);
+}
+
+EXPORT_SYMBOL_GPL(nand_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
new file mode 100644
index 000000000..344a24fd2
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -0,0 +1,1455 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Overview:
+ * Bad block table support for the NAND driver
+ *
+ * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Description:
+ *
+ * When nand_scan_bbt is called, then it tries to find the bad block table
+ * depending on the options in the BBT descriptor(s). If no flash based BBT
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
+ * marked good / bad blocks. This information is used to create a memory BBT.
+ * Once a new bad block is discovered then the "factory" information is updated
+ * on the device.
+ * If a flash based BBT is specified then the function first tries to find the
+ * BBT on flash. If a BBT is found then the contents are read and the memory
+ * based BBT is created. If a mirrored BBT is selected then the mirror is
+ * searched too and the versions are compared. If the mirror has a greater
+ * version number, then the mirror BBT is used to build the memory based BBT.
+ * If the tables are not versioned, then we "or" the bad block information.
+ * If one of the BBTs is out of date or does not exist it is (re)created.
+ * If no BBT exists at all then the device is scanned for factory marked
+ * good / bad blocks and the bad block tables are created.
+ *
+ * For manufacturer created BBTs like the one found on M-SYS DOC devices
+ * the BBT is searched and read but never created
+ *
+ * The auto generated bad block table is located in the last good blocks
+ * of the device. The table is mirrored, so it can be updated eventually.
+ * The table is marked in the OOB area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date. If the NAND
+ * controller needs the complete OOB area for the ECC information then the
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
+ *
+ * The table uses 2 bits per block
+ * 11b: block is good
+ * 00b: block is factory marked bad
+ * 01b, 10b: block is marked bad due to wear
+ *
+ * The memory bad block table uses the following scheme:
+ * 00b: block is good
+ * 01b: block is marked bad due to wear
+ * 10b: block is reserved (to protect the bbt area)
+ * 11b: block is factory marked bad
+ *
+ * Multichip devices like DOC store the bad block info per floor.
+ *
+ * Following assumptions are made:
+ * - bbts start at a page boundary, if autolocated on a block boundary
+ * - the space necessary for a bbt in FLASH does not exceed a block boundary
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/bbm.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+#include <linux/string.h>
+
+#include "internals.h"
+
+#define BBT_BLOCK_GOOD 0x00
+#define BBT_BLOCK_WORN 0x01
+#define BBT_BLOCK_RESERVED 0x02
+#define BBT_BLOCK_FACTORY_BAD 0x03
+
+#define BBT_ENTRY_MASK 0x03
+#define BBT_ENTRY_SHIFT 2
+
+static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
+{
+ uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+ entry >>= (block & BBT_ENTRY_MASK) * 2;
+ return entry & BBT_ENTRY_MASK;
+}
+
+static inline void bbt_mark_entry(struct nand_chip *chip, int block,
+ uint8_t mark)
+{
+ uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+ chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
+
+static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ if (memcmp(buf, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers.
+ */
+static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return check_pattern_no_oob(buf, td);
+
+ /* Compare the pattern */
+ if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
+static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ /* Compare the pattern */
+ if (memcmp(buf + td->offs, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * add_marker_len - compute the length of the marker in data area
+ * @td: BBT descriptor used for computation
+ *
+ * The length will be 0 if the marker is located in OOB area.
+ */
+static u32 add_marker_len(struct nand_bbt_descr *td)
+{
+ u32 len;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ return 0;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+ return len;
+}
+
+/**
+ * read_bbt - [GENERIC] Read the bad block table starting from page
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: block number offset in the table
+ *
+ * Read the bad block table starting from page.
+ */
+static int read_bbt(struct nand_chip *this, uint8_t *buf, int page, int num,
+ struct nand_bbt_descr *td, int offs)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int res, ret = 0, i, j, act = 0;
+ size_t retlen, len, totlen;
+ loff_t from;
+ int bits = td->options & NAND_BBT_NRBITS_MSK;
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;
+
+ totlen = (num * bits) >> 3;
+ marker_len = add_marker_len(td);
+ from = ((loff_t)page) << this->page_shift;
+
+ while (totlen) {
+ len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
+ res = mtd_read(mtd, from, len, &retlen, buf);
+ if (res < 0) {
+ if (mtd_is_eccerr(res)) {
+ pr_info("nand_bbt: ECC error in BBT at 0x%012llx\n",
+ from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("nand_bbt: corrected error in BBT at 0x%012llx\n",
+ from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("nand_bbt: error reading BBT\n");
+ return res;
+ }
+ }
+
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+ for (j = 0; j < 8; j += bits, act++) {
+ uint8_t tmp = (dat >> j) & msk;
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code && (tmp == reserved_block_code)) {
+ pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_RESERVED);
+ mtd->ecc_stats.bbtblocks++;
+ continue;
+ }
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("nand_read_bbt: bad block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ /* Factory marked bad or worn out? */
+ if (tmp == 0)
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_FACTORY_BAD);
+ else
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_WORN);
+ mtd->ecc_stats.badblocks++;
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return ret;
+}
+
+/**
+ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ * NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
+static int read_abs_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td, int chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ u64 targetsize = nanddev_target_size(&this->base);
+ int res = 0, i;
+
+ if (td->options & NAND_BBT_PERCHIP) {
+ int offs = 0;
+ for (i = 0; i < nanddev_ntargets(&this->base); i++) {
+ if (chip == -1 || chip == i)
+ res = read_bbt(this, buf, td->pages[i],
+ targetsize >> this->bbt_erase_shift,
+ td, offs);
+ if (res)
+ return res;
+ offs += targetsize >> this->bbt_erase_shift;
+ }
+ } else {
+ res = read_bbt(this, buf, td->pages[0],
+ mtd->size >> this->bbt_erase_shift, td, 0);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+/* BBT marker is in the first page, no OOB */
+static int scan_read_data(struct nand_chip *this, uint8_t *buf, loff_t offs,
+ struct nand_bbt_descr *td)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ size_t retlen;
+ size_t len;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+
+ return mtd_read(mtd, offs, len, &retlen, buf);
+}
+
+/**
+ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @offs: offset at which to scan
+ * @len: length of data region to read
+ *
+ * Scan read data from data+OOB. May traverse multiple pages, interleaving
+ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
+ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
+ */
+static int scan_read_oob(struct nand_chip *this, uint8_t *buf, loff_t offs,
+ size_t len)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ struct mtd_oob_ops ops;
+ int res, ret = 0;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+
+ while (len > 0) {
+ ops.datbuf = buf;
+ ops.len = min(len, (size_t)mtd->writesize);
+ ops.oobbuf = buf + ops.len;
+
+ res = mtd_read_oob(mtd, offs, &ops);
+ if (res) {
+ if (!mtd_is_bitflip_or_eccerr(res))
+ return res;
+ else if (mtd_is_eccerr(res) || !ret)
+ ret = res;
+ }
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ offs += mtd->writesize;
+ }
+ return ret;
+}
+
+static int scan_read(struct nand_chip *this, uint8_t *buf, loff_t offs,
+ size_t len, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return scan_read_data(this, buf, offs, td);
+ else
+ return scan_read_oob(this, buf, offs, len);
+}
+
+/* Scan write data with oob to flash */
+static int scan_write_bbt(struct nand_chip *this, loff_t offs, size_t len,
+ uint8_t *buf, uint8_t *oob)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.datbuf = buf;
+ ops.oobbuf = oob;
+ ops.len = len;
+
+ return mtd_write_oob(mtd, offs, &ops);
+}
+
+static u32 bbt_get_ver_offs(struct nand_chip *this, struct nand_bbt_descr *td)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ u32 ver_offs = td->veroffs;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ ver_offs += mtd->writesize;
+ return ver_offs;
+}
+
+/**
+ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
+static void read_abs_bbts(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+
+ /* Read the primary version, if available */
+ if (td->options & NAND_BBT_VERSION) {
+ scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift,
+ mtd->writesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(this, td)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ td->pages[0], td->version[0]);
+ }
+
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift,
+ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(this, md)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
+ }
+}
+
+/* Scan a given block partially */
+static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
+ loff_t offs, uint8_t *buf)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+
+ struct mtd_oob_ops ops;
+ int ret, page_offset;
+
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ page_offset = nand_bbm_get_next_page(this, 0);
+
+ while (page_offset >= 0) {
+ /*
+ * Read the full oob until read_oob is fixed to handle single
+ * byte reads for 16 bit buswidth.
+ */
+ ret = mtd_read_oob(mtd, offs + (page_offset * mtd->writesize),
+ &ops);
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
+ return ret;
+
+ if (check_short_pattern(buf, bd))
+ return 1;
+
+ page_offset = nand_bbm_get_next_page(this, page_offset + 1);
+ }
+
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
+ */
+static int create_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int i, numblocks, startblock;
+ loff_t from;
+
+ pr_info("Scanning device for bad blocks\n");
+
+ if (chip == -1) {
+ numblocks = mtd->size >> this->bbt_erase_shift;
+ startblock = 0;
+ from = 0;
+ } else {
+ if (chip >= nanddev_ntargets(&this->base)) {
+ pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
+ chip + 1, nanddev_ntargets(&this->base));
+ return -EINVAL;
+ }
+ numblocks = targetsize >> this->bbt_erase_shift;
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ from = (loff_t)startblock << this->bbt_erase_shift;
+ }
+
+ for (i = startblock; i < numblocks; i++) {
+ int ret;
+
+ BUG_ON(bd->options & NAND_BBT_NO_OOB);
+
+ ret = scan_block_fast(this, bd, from, buf);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i, (unsigned long long)from);
+ mtd->ecc_stats.badblocks++;
+ }
+
+ from += (1 << this->bbt_erase_shift);
+ }
+ return 0;
+}
+
+/**
+ * search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page in a block.
+ */
+static int search_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int i, chips;
+ int startblock, block, dir;
+ int scanlen = mtd->writesize + mtd->oobsize;
+ int bbtblocks;
+ int blocktopage = this->bbt_erase_shift - this->page_shift;
+
+ /* Search direction top -> down? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+ dir = -1;
+ } else {
+ startblock = 0;
+ dir = 1;
+ }
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = nanddev_ntargets(&this->base);
+ bbtblocks = targetsize >> this->bbt_erase_shift;
+ startblock &= bbtblocks - 1;
+ } else {
+ chips = 1;
+ bbtblocks = mtd->size >> this->bbt_erase_shift;
+ }
+
+ for (i = 0; i < chips; i++) {
+ /* Reset version information */
+ td->version[i] = 0;
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+
+ int actblock = startblock + dir * block;
+ loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+
+ /* Read first page */
+ scan_read(this, buf, offs, mtd->writesize, td);
+ if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
+ td->pages[i] = actblock << blocktopage;
+ if (td->options & NAND_BBT_VERSION) {
+ offs = bbt_get_ver_offs(this, td);
+ td->version[i] = buf[offs];
+ }
+ break;
+ }
+ }
+ startblock += targetsize >> this->bbt_erase_shift;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ pr_warn("Bad block table not found for chip %d\n", i);
+ else
+ pr_info("Bad block table found at page %d, version 0x%02X\n",
+ td->pages[i], td->version[i]);
+ }
+ return 0;
+}
+
+/**
+ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s).
+ */
+static void search_read_bbts(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md)
+{
+ /* Search the primary table */
+ search_bbt(this, buf, td);
+
+ /* Search the mirror table */
+ if (md)
+ search_bbt(this, buf, md);
+}
+
+/**
+ * get_bbt_block - Get the first valid eraseblock suitable to store a BBT
+ * @this: the NAND device
+ * @td: the BBT description
+ * @md: the mirror BBT descriptor
+ * @chip: the CHIP selector
+ *
+ * This functions returns a positive block number pointing a valid eraseblock
+ * suitable to store a BBT (i.e. in the range reserved for BBT), or -ENOSPC if
+ * all blocks are already used of marked bad. If td->pages[chip] was already
+ * pointing to a valid block we re-use it, otherwise we search for the next
+ * valid one.
+ */
+static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md, int chip)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ int startblock, dir, page, numblocks, i;
+
+ /*
+ * There was already a version of the table, reuse the page. This
+ * applies for absolute placement too, as we have the page number in
+ * td->pages.
+ */
+ if (td->pages[chip] != -1)
+ return td->pages[chip] >>
+ (this->bbt_erase_shift - this->page_shift);
+
+ numblocks = (int)(targetsize >> this->bbt_erase_shift);
+ if (!(td->options & NAND_BBT_PERCHIP))
+ numblocks *= nanddev_ntargets(&this->base);
+
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+
+ /* Check, if the block is bad */
+ switch (bbt_get_entry(this, block)) {
+ case BBT_BLOCK_WORN:
+ case BBT_BLOCK_FACTORY_BAD:
+ continue;
+ }
+
+ page = block << (this->bbt_erase_shift - this->page_shift);
+
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ return block;
+ }
+
+ return -ENOSPC;
+}
+
+/**
+ * mark_bbt_block_bad - Mark one of the block reserved for BBT bad
+ * @this: the NAND device
+ * @td: the BBT description
+ * @chip: the CHIP selector
+ * @block: the BBT block to mark
+ *
+ * Blocks reserved for BBT can become bad. This functions is an helper to mark
+ * such blocks as bad. It takes care of updating the in-memory BBT, marking the
+ * block as bad using a bad block marker and invalidating the associated
+ * td->pages[] entry.
+ */
+static void mark_bbt_block_bad(struct nand_chip *this,
+ struct nand_bbt_descr *td,
+ int chip, int block)
+{
+ loff_t to;
+ int res;
+
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ to = (loff_t)block << this->bbt_erase_shift;
+ res = nand_markbad_bbm(this, to);
+ if (res)
+ pr_warn("nand_bbt: error %d while marking block %d bad\n",
+ res, block);
+
+ td->pages[chip] = -1;
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table.
+ */
+static int write_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+ int chipsel)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
+ struct erase_info einfo;
+ int i, res, chip = 0;
+ int bits, page, offs, numblocks, sft, sftmsk;
+ int nrchips, pageoffs, ooboffs;
+ uint8_t msk[4];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops;
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ if (!rcode)
+ rcode = 0xff;
+ /* Write bad block table per chip rather than per device? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ numblocks = (int)(targetsize >> this->bbt_erase_shift);
+ /* Full device write or specific chip? */
+ if (chipsel == -1) {
+ nrchips = nanddev_ntargets(&this->base);
+ } else {
+ nrchips = chipsel + 1;
+ chip = chipsel;
+ }
+ } else {
+ numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ nrchips = 1;
+ }
+
+ /* Loop through the chips */
+ while (chip < nrchips) {
+ int block;
+
+ block = get_bbt_block(this, td, md, chip);
+ if (block < 0) {
+ pr_err("No space left to write bad block table\n");
+ res = block;
+ goto outerr;
+ }
+
+ /*
+ * get_bbt_block() returns a block number, shift the value to
+ * get a page number.
+ */
+ page = block << (this->bbt_erase_shift - this->page_shift);
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ msk[2] = ~rcode;
+ switch (bits) {
+ case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x01;
+ break;
+ case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x03;
+ break;
+ case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
+ msk[3] = 0x0f;
+ break;
+ case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
+ msk[3] = 0xff;
+ break;
+ default: return -EINVAL;
+ }
+
+ to = ((loff_t)page) << this->page_shift;
+
+ /* Must we save the block contents? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ /* Make it block aligned */
+ to &= ~(((loff_t)1 << this->bbt_erase_shift) - 1);
+ len = 1 << this->bbt_erase_shift;
+ res = mtd_read(mtd, to, len, &retlen, buf);
+ if (res < 0) {
+ if (retlen != len) {
+ pr_info("nand_bbt: error reading block for writing the bad block table\n");
+ return res;
+ }
+ pr_warn("nand_bbt: ECC error while reading block for writing bad block table\n");
+ }
+ /* Read oob data */
+ ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ ops.oobbuf = &buf[len];
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
+ if (res < 0 || ops.oobretlen != ops.ooblen)
+ goto outerr;
+
+ /* Calc the byte offset in the buffer */
+ pageoffs = page - (int)(to >> this->page_shift);
+ offs = pageoffs << this->page_shift;
+ /* Preset the bbt area with 0xff */
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
+ ooboffs = len + (pageoffs * mtd->oobsize);
+
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* The version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ len += offs;
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ memcpy(buf, td->pattern, td->len);
+ } else {
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len +
+ (len >> this->page_shift)* mtd->oobsize);
+ offs = 0;
+ ooboffs = len;
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+ }
+
+ if (td->options & NAND_BBT_VERSION)
+ buf[ooboffs + td->veroffs] = td->version[chip];
+
+ /* Walk through the memory table */
+ for (i = 0; i < numblocks; i++) {
+ uint8_t dat;
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ dat = bbt_get_entry(this, chip * numblocks + i);
+ /* Do not store the reserved bbt blocks! */
+ buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
+ }
+
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.addr = to;
+ einfo.len = 1 << this->bbt_erase_shift;
+ res = nand_erase_nand(this, &einfo, 1);
+ if (res < 0) {
+ pr_warn("nand_bbt: error while erasing BBT block %d\n",
+ res);
+ mark_bbt_block_bad(this, td, chip, block);
+ continue;
+ }
+
+ res = scan_write_bbt(this, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ?
+ NULL : &buf[len]);
+ if (res < 0) {
+ pr_warn("nand_bbt: error while writing BBT block %d\n",
+ res);
+ mark_bbt_block_bad(this, td, chip, block);
+ continue;
+ }
+
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
+
+ /* Mark it as used */
+ td->pages[chip++] = page;
+ }
+ return 0;
+
+ outerr:
+ pr_warn("nand_bbt: error while writing bad block table %d\n", res);
+ return res;
+}
+
+/**
+ * nand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @this: NAND chip object
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
+static inline int nand_memory_bbt(struct nand_chip *this,
+ struct nand_bbt_descr *bd)
+{
+ u8 *pagebuf = nand_get_data_buf(this);
+
+ return create_bbt(this, pagebuf, bd, -1);
+}
+
+/**
+ * check_create - [GENERIC] create and write bbt(s) if necessary
+ * @this: the NAND device
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
+static int check_create(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *bd)
+{
+ int i, chips, writeops, create, chipsel, res, res2;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+ struct nand_bbt_descr *rd, *rd2;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP)
+ chips = nanddev_ntargets(&this->base);
+ else
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ writeops = 0;
+ create = 0;
+ rd = NULL;
+ rd2 = NULL;
+ res = res2 = 0;
+ /* Per chip or per device? */
+ chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
+ /* Mirrored table available? */
+ if (md) {
+ if (td->pages[i] == -1 && md->pages[i] == -1) {
+ create = 1;
+ writeops = 0x03;
+ } else if (td->pages[i] == -1) {
+ rd = md;
+ writeops = 0x01;
+ } else if (md->pages[i] == -1) {
+ rd = td;
+ writeops = 0x02;
+ } else if (td->version[i] == md->version[i]) {
+ rd = td;
+ if (!(td->options & NAND_BBT_VERSION))
+ rd2 = md;
+ } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
+ rd = td;
+ writeops = 0x02;
+ } else {
+ rd = md;
+ writeops = 0x01;
+ }
+ } else {
+ if (td->pages[i] == -1) {
+ create = 1;
+ writeops = 0x01;
+ } else {
+ rd = td;
+ }
+ }
+
+ if (create) {
+ /* Create the bad block table by scanning the device? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
+ create_bbt(this, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ }
+
+ /* Read back first? */
+ if (rd) {
+ res = read_abs_bbt(this, buf, rd, chipsel);
+ if (mtd_is_eccerr(res)) {
+ /* Mark table as invalid */
+ rd->pages[i] = -1;
+ rd->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+ /* If they weren't versioned, read both */
+ if (rd2) {
+ res2 = read_abs_bbt(this, buf, rd2, chipsel);
+ if (mtd_is_eccerr(res2)) {
+ /* Mark table as invalid */
+ rd2->pages[i] = -1;
+ rd2->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+
+ /* Scrub the flash table(s)? */
+ if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+ writeops = 0x03;
+
+ /* Update version numbers before writing */
+ if (md) {
+ td->version[i] = max(td->version[i], md->version[i]);
+ md->version[i] = td->version[i];
+ }
+
+ /* Write the bad block table to the device? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, td, md, chipsel);
+ if (res < 0)
+ return res;
+ }
+
+ /* Write the mirror bad block table to the device? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, md, td, chipsel);
+ if (res < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+/**
+ * nand_update_bbt - update bad block table(s)
+ * @this: the NAND device
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+static int nand_update_bbt(struct nand_chip *this, loff_t offs)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int len, res = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chip = (int)(offs >> this->chip_shift);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
+ res = write_bbt(this, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, md, td, chipsel);
+ }
+
+ out:
+ kfree(buf);
+ return res;
+}
+
+/**
+ * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * @this: the NAND device
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
+static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = nanddev_ntargets(&this->base);
+ nrblocks = (int)(targetsize >> this->bbt_erase_shift);
+ } else {
+ chips = 1;
+ nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ }
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1)
+ continue;
+ block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if ((oldval != BBT_BLOCK_RESERVED) &&
+ td->reserved_block_code)
+ nand_update_bbt(this, (loff_t)block <<
+ this->bbt_erase_shift);
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK)
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ else
+ block = i * nrblocks;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if (oldval != BBT_BLOCK_RESERVED)
+ update = 1;
+ block++;
+ }
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
+ if (update && td->reserved_block_code)
+ nand_update_bbt(this, (loff_t)(block - 1) <<
+ this->bbt_erase_shift);
+ }
+}
+
+/**
+ * verify_bbt_descr - verify the bad block description
+ * @this: the NAND device
+ * @bd: the table to verify
+ *
+ * This functions performs a few sanity checks on the bad block description
+ * table.
+ */
+static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
+ u32 pattern_len;
+ u32 bits;
+ u32 table_size;
+
+ if (!bd)
+ return;
+
+ pattern_len = bd->len;
+ bits = bd->options & NAND_BBT_NRBITS_MSK;
+
+ BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+ !(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!bits);
+
+ if (bd->options & NAND_BBT_VERSION)
+ pattern_len++;
+
+ if (bd->options & NAND_BBT_NO_OOB) {
+ BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
+ BUG_ON(bd->offs);
+ if (bd->options & NAND_BBT_VERSION)
+ BUG_ON(bd->veroffs != bd->len);
+ BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
+ }
+
+ if (bd->options & NAND_BBT_PERCHIP)
+ table_size = targetsize >> this->bbt_erase_shift;
+ else
+ table_size = mtd->size >> this->bbt_erase_shift;
+ table_size >>= 3;
+ table_size *= bits;
+ if (bd->options & NAND_BBT_NO_OOB)
+ table_size += pattern_len;
+ BUG_ON(table_size > (1 << this->bbt_erase_shift));
+}
+
+/**
+ * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @this: the NAND device
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
+static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int len, res;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ len = (mtd->size >> (this->bbt_erase_shift + 2)) ? : 1;
+ /*
+ * Allocate memory (2bit per block) and clear the memory bad block
+ * table.
+ */
+ this->bbt = kzalloc(len, GFP_KERNEL);
+ if (!this->bbt)
+ return -ENOMEM;
+
+ /*
+ * If no primary table descriptor is given, scan the device to build a
+ * memory based bad block table.
+ */
+ if (!td) {
+ if ((res = nand_memory_bbt(this, bd))) {
+ pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
+ goto err_free_bbt;
+ }
+ return 0;
+ }
+ verify_bbt_descr(this, td);
+ verify_bbt_descr(this, md);
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = vmalloc(len);
+ if (!buf) {
+ res = -ENOMEM;
+ goto err_free_bbt;
+ }
+
+ /* Is the bbt at a given page? */
+ if (td->options & NAND_BBT_ABSPAGE) {
+ read_abs_bbts(this, buf, td, md);
+ } else {
+ /* Search the bad block table using a pattern in oob */
+ search_read_bbts(this, buf, td, md);
+ }
+
+ res = check_create(this, buf, bd);
+ if (res)
+ goto err_free_buf;
+
+ /* Prevent the bbt regions from erasing / writing */
+ mark_bbt_region(this, td);
+ if (md)
+ mark_bbt_region(this, md);
+
+ vfree(buf);
+ return 0;
+
+err_free_buf:
+ vfree(buf);
+err_free_bbt:
+ kfree(this->bbt);
+ this->bbt = NULL;
+ return res;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+/* Generic flash bbt descriptors */
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+static struct nand_bbt_descr bbt_main_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
+/**
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
+ *
+ * This function allocates and initializes a nand_bbt_descr for BBM detection
+ * based on the properties of @this. The new descriptor is stored in
+ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
+ * passed to this function.
+ */
+static int nand_create_badblock_pattern(struct nand_chip *this)
+{
+ struct nand_bbt_descr *bd;
+ if (this->badblock_pattern) {
+ pr_warn("Bad block pattern already allocated; not replacing\n");
+ return -EINVAL;
+ }
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return -ENOMEM;
+ bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
+ bd->offs = this->badblockpos;
+ bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ bd->pattern = scan_ff_pattern;
+ bd->options |= NAND_BBT_DYNAMICSTRUCT;
+ this->badblock_pattern = bd;
+ return 0;
+}
+
+/**
+ * nand_create_bbt - [NAND Interface] Select a default bad block table for the device
+ * @this: NAND chip object
+ *
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
+int nand_create_bbt(struct nand_chip *this)
+{
+ int ret;
+
+ /* Is a flash based bad block table requested? */
+ if (this->bbt_options & NAND_BBT_USE_FLASH) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ if (this->bbt_options & NAND_BBT_NO_OOB) {
+ this->bbt_td = &bbt_main_no_oob_descr;
+ this->bbt_md = &bbt_mirror_no_oob_descr;
+ } else {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ }
+ } else {
+ this->bbt_td = NULL;
+ this->bbt_md = NULL;
+ }
+
+ if (!this->badblock_pattern) {
+ ret = nand_create_badblock_pattern(this);
+ if (ret)
+ return ret;
+ }
+
+ return nand_scan_bbt(this, this->badblock_pattern);
+}
+EXPORT_SYMBOL(nand_create_bbt);
+
+/**
+ * nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
+ * @this: NAND chip object
+ * @offs: offset in the device
+ */
+int nand_isreserved_bbt(struct nand_chip *this, loff_t offs)
+{
+ int block;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+ return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
+}
+
+/**
+ * nand_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @this: NAND chip object
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
+{
+ int block, res;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+ res = bbt_get_entry(this, block);
+
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ (unsigned int)offs, block, res);
+
+ switch (res) {
+ case BBT_BLOCK_GOOD:
+ return 0;
+ case BBT_BLOCK_WORN:
+ return 1;
+ case BBT_BLOCK_RESERVED:
+ return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
+
+/**
+ * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
+ * @this: NAND chip object
+ * @offs: offset of the bad block
+ */
+int nand_markbad_bbt(struct nand_chip *this, loff_t offs)
+{
+ int block, ret = 0;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+
+ /* Mark bad block in memory */
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ /* Update flash-based bad block table */
+ if (this->bbt_options & NAND_BBT_USE_FLASH)
+ ret = nand_update_bbt(this, offs);
+
+ return ret;
+}
diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c
new file mode 100644
index 000000000..9d19ac14c
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_bch.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file provides ECC correction for more than 1 bit per block of data,
+ * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
+ *
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/bch.h>
+
+/**
+ * struct nand_bch_control - private NAND BCH control structure
+ * @bch: BCH control structure
+ * @errloc: error location array
+ * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_bch_control {
+ struct bch_control *bch;
+ unsigned int *errloc;
+ unsigned char *eccmask;
+};
+
+/**
+ * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
+ * @chip: NAND chip object
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_bch_calculate_ecc(struct nand_chip *chip, const unsigned char *buf,
+ unsigned char *code)
+{
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int i;
+
+ memset(code, 0, chip->ecc.bytes);
+ bch_encode(nbc->bch, buf, chip->ecc.size, code);
+
+ /* apply mask so that an erased page is a valid codeword */
+ for (i = 0; i < chip->ecc.bytes; i++)
+ code[i] ^= nbc->eccmask[i];
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_bch_calculate_ecc);
+
+/**
+ * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @chip: NAND chip object
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct bit errors for a data byte block
+ */
+int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int *errloc = nbc->errloc;
+ int i, count;
+
+ count = bch_decode(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
+ NULL, errloc);
+ if (count > 0) {
+ for (i = 0; i < count; i++) {
+ if (errloc[i] < (chip->ecc.size*8))
+ /* error is located in data, correct it */
+ buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
+ /* else error in ecc, no action needed */
+
+ pr_debug("%s: corrected bitflip %u\n", __func__,
+ errloc[i]);
+ }
+ } else if (count < 0) {
+ pr_err("ecc unrecoverable error\n");
+ count = -EBADMSG;
+ }
+ return count;
+}
+EXPORT_SYMBOL(nand_bch_correct_data);
+
+/**
+ * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
+ * @mtd: MTD block structure
+ *
+ * Returns:
+ * a pointer to a new NAND BCH control structure, or NULL upon failure
+ *
+ * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes
+ * are used to compute BCH parameters m (Galois field order) and t (error
+ * correction capability). @eccbytes should be equal to the number of bytes
+ * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8.
+ *
+ * Example: to configure 4 bit correction per 512 bytes, you should pass
+ * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
+ * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
+ */
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ unsigned int m, t, eccsteps, i;
+ struct nand_bch_control *nbc = NULL;
+ unsigned char *erased_page;
+ unsigned int eccsize = nand->ecc.size;
+ unsigned int eccbytes = nand->ecc.bytes;
+ unsigned int eccstrength = nand->ecc.strength;
+
+ if (!eccbytes && eccstrength) {
+ eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
+ nand->ecc.bytes = eccbytes;
+ }
+
+ if (!eccsize || !eccbytes) {
+ pr_warn("ecc parameters not supplied\n");
+ goto fail;
+ }
+
+ m = fls(1+8*eccsize);
+ t = (eccbytes*8)/m;
+
+ nbc = kzalloc(sizeof(*nbc), GFP_KERNEL);
+ if (!nbc)
+ goto fail;
+
+ nbc->bch = bch_init(m, t, 0, false);
+ if (!nbc->bch)
+ goto fail;
+
+ /* verify that eccbytes has the expected value */
+ if (nbc->bch->ecc_bytes != eccbytes) {
+ pr_warn("invalid eccbytes %u, should be %u\n",
+ eccbytes, nbc->bch->ecc_bytes);
+ goto fail;
+ }
+
+ eccsteps = mtd->writesize/eccsize;
+
+ /* Check that we have an oob layout description. */
+ if (!mtd->ooblayout) {
+ pr_warn("missing oob scheme");
+ goto fail;
+ }
+
+ /* sanity checks */
+ if (8*(eccsize+eccbytes) >= (1 << m)) {
+ pr_warn("eccsize %u is too large\n", eccsize);
+ goto fail;
+ }
+
+ /*
+ * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(),
+ * which is called by mtd_ooblayout_count_eccbytes().
+ * Make sure they are properly initialized before calling
+ * mtd_ooblayout_count_eccbytes().
+ * FIXME: we should probably rework the sequencing in nand_scan_tail()
+ * to avoid setting those fields twice.
+ */
+ nand->ecc.steps = eccsteps;
+ nand->ecc.total = eccsteps * eccbytes;
+ nand->base.ecc.ctx.total = nand->ecc.total;
+ if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
+ pr_warn("invalid ecc layout\n");
+ goto fail;
+ }
+
+ nbc->eccmask = kzalloc(eccbytes, GFP_KERNEL);
+ nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL);
+ if (!nbc->eccmask || !nbc->errloc)
+ goto fail;
+ /*
+ * compute and store the inverted ecc of an erased ecc block
+ */
+ erased_page = kmalloc(eccsize, GFP_KERNEL);
+ if (!erased_page)
+ goto fail;
+
+ memset(erased_page, 0xff, eccsize);
+ bch_encode(nbc->bch, erased_page, eccsize, nbc->eccmask);
+ kfree(erased_page);
+
+ for (i = 0; i < eccbytes; i++)
+ nbc->eccmask[i] ^= 0xff;
+
+ if (!eccstrength)
+ nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
+
+ return nbc;
+fail:
+ nand_bch_free(nbc);
+ return NULL;
+}
+EXPORT_SYMBOL(nand_bch_init);
+
+/**
+ * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources
+ * @nbc: NAND BCH control structure
+ */
+void nand_bch_free(struct nand_bch_control *nbc)
+{
+ if (nbc) {
+ bch_free(nbc->bch);
+ kfree(nbc->errloc);
+ kfree(nbc->eccmask);
+ kfree(nbc);
+ }
+}
+EXPORT_SYMBOL(nand_bch_free);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
+MODULE_DESCRIPTION("NAND software BCH ECC support");
diff --git a/drivers/mtd/nand/raw/nand_ecc.c b/drivers/mtd/nand/raw/nand_ecc.c
new file mode 100644
index 000000000..b6a46b1b7
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_ecc.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file contains an ECC algorithm that detects and corrects 1 bit
+ * errors in a 256 byte block of data.
+ *
+ * Copyright © 2008 Koninklijke Philips Electronics NV.
+ * Author: Frans Meulenbroeks
+ *
+ * Completely replaces the previous ECC implementation which was written by:
+ * Steven J. Hill (sjhill@realitydiluted.com)
+ * Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Information on how this algorithm works and how it was developed
+ * can be found in Documentation/driver-api/mtd/nand_ecc.rst
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/byteorder.h>
+
+/*
+ * invparity is a 256 byte table that contains the odd parity
+ * for each byte. So if the number of bits in a byte is even,
+ * the array element is 1, and when the number of bits is odd
+ * the array eleemnt is 0.
+ */
+static const char invparity[256] = {
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
+};
+
+/*
+ * bitsperbyte contains the number of bits per byte
+ * this is only used for testing and repairing parity
+ * (a precalculated value slightly improves performance)
+ */
+static const char bitsperbyte[256] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
+
+/*
+ * addressbits is a lookup table to filter out the bits from the xor-ed
+ * ECC data that identify the faulty location.
+ * this is only used for repairing parity
+ * see the comments in nand_correct_data for more details
+ */
+static const char addressbits[256] = {
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
+};
+
+/**
+ * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ * block
+ * @buf: input buffer with raw data
+ * @eccsize: data bytes per ECC step (256 or 512)
+ * @code: output buffer with ECC
+ * @sm_order: Smart Media byte ordering
+ */
+void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
+ unsigned char *code, bool sm_order)
+{
+ int i;
+ const uint32_t *bp = (uint32_t *)buf;
+ /* 256 or 512 bytes/ecc */
+ const uint32_t eccsize_mult = eccsize >> 8;
+ uint32_t cur; /* current value in buffer */
+ /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
+ uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+ uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
+ uint32_t rp17;
+ uint32_t par; /* the cumulative parity for all data */
+ uint32_t tmppar; /* the cumulative parity for this iteration;
+ for rp12, rp14 and rp16 at the end of the
+ loop */
+
+ par = 0;
+ rp4 = 0;
+ rp6 = 0;
+ rp8 = 0;
+ rp10 = 0;
+ rp12 = 0;
+ rp14 = 0;
+ rp16 = 0;
+
+ /*
+ * The loop is unrolled a number of times;
+ * This avoids if statements to decide on which rp value to update
+ * Also we process the data by longwords.
+ * Note: passing unaligned data might give a performance penalty.
+ * It is assumed that the buffers are aligned.
+ * tmppar is the cumulative sum of this iteration.
+ * needed for calculating rp12, rp14, rp16 and par
+ * also used as a performance improvement for rp6, rp8 and rp10
+ */
+ for (i = 0; i < eccsize_mult << 2; i++) {
+ cur = *bp++;
+ tmppar = cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= tmppar;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= tmppar;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp10 ^= tmppar;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= cur;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+
+ par ^= tmppar;
+ if ((i & 0x1) == 0)
+ rp12 ^= tmppar;
+ if ((i & 0x2) == 0)
+ rp14 ^= tmppar;
+ if (eccsize_mult == 2 && (i & 0x4) == 0)
+ rp16 ^= tmppar;
+ }
+
+ /*
+ * handle the fact that we use longword operations
+ * we'll bring rp4..rp14..rp16 back to single byte entities by
+ * shifting and xoring first fold the upper and lower 16 bits,
+ * then the upper and lower 8 bits.
+ */
+ rp4 ^= (rp4 >> 16);
+ rp4 ^= (rp4 >> 8);
+ rp4 &= 0xff;
+ rp6 ^= (rp6 >> 16);
+ rp6 ^= (rp6 >> 8);
+ rp6 &= 0xff;
+ rp8 ^= (rp8 >> 16);
+ rp8 ^= (rp8 >> 8);
+ rp8 &= 0xff;
+ rp10 ^= (rp10 >> 16);
+ rp10 ^= (rp10 >> 8);
+ rp10 &= 0xff;
+ rp12 ^= (rp12 >> 16);
+ rp12 ^= (rp12 >> 8);
+ rp12 &= 0xff;
+ rp14 ^= (rp14 >> 16);
+ rp14 ^= (rp14 >> 8);
+ rp14 &= 0xff;
+ if (eccsize_mult == 2) {
+ rp16 ^= (rp16 >> 16);
+ rp16 ^= (rp16 >> 8);
+ rp16 &= 0xff;
+ }
+
+ /*
+ * we also need to calculate the row parity for rp0..rp3
+ * This is present in par, because par is now
+ * rp3 rp3 rp2 rp2 in little endian and
+ * rp2 rp2 rp3 rp3 in big endian
+ * as well as
+ * rp1 rp0 rp1 rp0 in little endian and
+ * rp0 rp1 rp0 rp1 in big endian
+ * First calculate rp2 and rp3
+ */
+#ifdef __BIG_ENDIAN
+ rp2 = (par >> 16);
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
+ rp3 = par & 0xffff;
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
+#else
+ rp3 = (par >> 16);
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
+ rp2 = par & 0xffff;
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
+#endif
+
+ /* reduce par to 16 bits then calculate rp1 and rp0 */
+ par ^= (par >> 16);
+#ifdef __BIG_ENDIAN
+ rp0 = (par >> 8) & 0xff;
+ rp1 = (par & 0xff);
+#else
+ rp1 = (par >> 8) & 0xff;
+ rp0 = (par & 0xff);
+#endif
+
+ /* finally reduce par to 8 bits */
+ par ^= (par >> 8);
+ par &= 0xff;
+
+ /*
+ * and calculate rp5..rp15..rp17
+ * note that par = rp4 ^ rp5 and due to the commutative property
+ * of the ^ operator we can say:
+ * rp5 = (par ^ rp4);
+ * The & 0xff seems superfluous, but benchmarking learned that
+ * leaving it out gives slightly worse results. No idea why, probably
+ * it has to do with the way the pipeline in pentium is organized.
+ */
+ rp5 = (par ^ rp4) & 0xff;
+ rp7 = (par ^ rp6) & 0xff;
+ rp9 = (par ^ rp8) & 0xff;
+ rp11 = (par ^ rp10) & 0xff;
+ rp13 = (par ^ rp12) & 0xff;
+ rp15 = (par ^ rp14) & 0xff;
+ if (eccsize_mult == 2)
+ rp17 = (par ^ rp16) & 0xff;
+
+ /*
+ * Finally calculate the ECC bits.
+ * Again here it might seem that there are performance optimisations
+ * possible, but benchmarks showed that on the system this is developed
+ * the code below is the fastest
+ */
+ if (sm_order) {
+ code[0] = (invparity[rp7] << 7) | (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) | (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) | (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) | (invparity[rp0]);
+ code[1] = (invparity[rp15] << 7) | (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) | (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) | (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) | (invparity[rp8]);
+ } else {
+ code[1] = (invparity[rp7] << 7) | (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) | (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) | (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) | (invparity[rp0]);
+ code[0] = (invparity[rp15] << 7) | (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) | (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) | (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) | (invparity[rp8]);
+ }
+
+ if (eccsize_mult == 1)
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ 3;
+ else
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ (invparity[rp17] << 1) |
+ (invparity[rp16] << 0);
+}
+EXPORT_SYMBOL(__nand_calculate_ecc);
+
+/**
+ * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ * block
+ * @chip: NAND chip object
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_calculate_ecc(struct nand_chip *chip, const unsigned char *buf,
+ unsigned char *code)
+{
+ bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
+ __nand_calculate_ecc(buf, chip->ecc.size, code, sm_order);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_calculate_ecc);
+
+/**
+ * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ * @eccsize: data bytes per ECC step (256 or 512)
+ * @sm_order: Smart Media byte order
+ *
+ * Detect and correct a 1 bit error for eccsize byte block
+ */
+int __nand_correct_data(unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc,
+ unsigned int eccsize, bool sm_order)
+{
+ unsigned char b0, b1, b2, bit_addr;
+ unsigned int byte_addr;
+ /* 256 or 512 bytes/ecc */
+ const uint32_t eccsize_mult = eccsize >> 8;
+
+ /*
+ * b0 to b2 indicate which bit is faulty (if any)
+ * we might need the xor result more than once,
+ * so keep them in a local var
+ */
+ if (sm_order) {
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+ } else {
+ b0 = read_ecc[1] ^ calc_ecc[1];
+ b1 = read_ecc[0] ^ calc_ecc[0];
+ }
+
+ b2 = read_ecc[2] ^ calc_ecc[2];
+
+ /* check if there are any bitfaults */
+
+ /* repeated if statements are slightly more efficient than switch ... */
+ /* ordered in order of likelihood */
+
+ if ((b0 | b1 | b2) == 0)
+ return 0; /* no error */
+
+ if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
+ (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
+ ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
+ (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
+ /* single bit error */
+ /*
+ * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
+ * byte, cp 5/3/1 indicate the faulty bit.
+ * A lookup table (called addressbits) is used to filter
+ * the bits from the byte they are in.
+ * A marginal optimisation is possible by having three
+ * different lookup tables.
+ * One as we have now (for b0), one for b2
+ * (that would avoid the >> 1), and one for b1 (with all values
+ * << 4). However it was felt that introducing two more tables
+ * hardly justify the gain.
+ *
+ * The b2 shift is there to get rid of the lowest two bits.
+ * We could also do addressbits[b2] >> 1 but for the
+ * performance it does not make any difference
+ */
+ if (eccsize_mult == 1)
+ byte_addr = (addressbits[b1] << 4) + addressbits[b0];
+ else
+ byte_addr = (addressbits[b2 & 0x3] << 8) +
+ (addressbits[b1] << 4) + addressbits[b0];
+ bit_addr = addressbits[b2 >> 2];
+ /* flip the bit */
+ buf[byte_addr] ^= (1 << bit_addr);
+ return 1;
+
+ }
+ /* count nr of bits; use table lookup, faster than calculating it */
+ if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
+ return 1; /* error in ECC data; no action needed */
+
+ pr_err("%s: uncorrectable ECC error\n", __func__);
+ return -EBADMSG;
+}
+EXPORT_SYMBOL(__nand_correct_data);
+
+/**
+ * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @chip: NAND chip object
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct a 1 bit error for 256/512 byte block
+ */
+int nand_correct_data(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
+ return __nand_correct_data(buf, read_ecc, calc_ecc, chip->ecc.size,
+ sm_order);
+}
+EXPORT_SYMBOL(nand_correct_data);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
+MODULE_DESCRIPTION("Generic NAND ECC support");
diff --git a/drivers/mtd/nand/raw/nand_esmt.c b/drivers/mtd/nand/raw/nand_esmt.c
new file mode 100644
index 000000000..4412c407a
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_esmt.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Toradex AG
+ *
+ * Author: Marcel Ziswiler <marcel.ziswiler@toradex.com>
+ */
+
+#include <linux/mtd/rawnand.h>
+#include "internals.h"
+
+static void esmt_nand_decode_id(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
+
+ nand_decode_ext_id(chip);
+
+ /* Extract ECC requirements from 5th id byte. */
+ if (chip->id.len >= 5 && nand_is_slc(chip)) {
+ requirements.step_size = 512;
+ switch (chip->id.data[4] & 0x3) {
+ case 0x0:
+ requirements.strength = 4;
+ break;
+ case 0x1:
+ requirements.strength = 2;
+ break;
+ case 0x2:
+ requirements.strength = 1;
+ break;
+ default:
+ WARN(1, "Could not get ECC info");
+ requirements.step_size = 0;
+ break;
+ }
+ }
+
+ nanddev_set_ecc_requirements(base, &requirements);
+}
+
+static int esmt_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ /*
+ * It is known that some ESMT SLC NANDs have been shipped
+ * with the factory bad block markers in the first or last page
+ * of the block, instead of the first or second page. To be on
+ * the safe side, let's check all three locations.
+ */
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
+ NAND_BBM_LASTPAGE;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops esmt_nand_manuf_ops = {
+ .detect = esmt_nand_decode_id,
+ .init = esmt_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
new file mode 100644
index 000000000..a9f50c9af
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include "internals.h"
+
+#define NAND_HYNIX_CMD_SET_PARAMS 0x36
+#define NAND_HYNIX_CMD_APPLY_PARAMS 0x16
+
+#define NAND_HYNIX_1XNM_RR_REPEAT 8
+
+/**
+ * struct hynix_read_retry - read-retry data
+ * @nregs: number of register to set when applying a new read-retry mode
+ * @regs: register offsets (NAND chip dependent)
+ * @values: array of values to set in registers. The array size is equal to
+ * (nregs * nmodes)
+ */
+struct hynix_read_retry {
+ int nregs;
+ const u8 *regs;
+ u8 values[];
+};
+
+/**
+ * struct hynix_nand - private Hynix NAND struct
+ * @nand_technology: manufacturing process expressed in picometer
+ * @read_retry: read-retry information
+ */
+struct hynix_nand {
+ const struct hynix_read_retry *read_retry;
+};
+
+/**
+ * struct hynix_read_retry_otp - structure describing how the read-retry OTP
+ * area
+ * @nregs: number of hynix private registers to set before reading the reading
+ * the OTP area
+ * @regs: registers that should be configured
+ * @values: values that should be set in regs
+ * @page: the address to pass to the READ_PAGE command. Depends on the NAND
+ * chip
+ * @size: size of the read-retry OTP section
+ */
+struct hynix_read_retry_otp {
+ int nregs;
+ const u8 *regs;
+ const u8 *values;
+ int page;
+ int size;
+};
+
+static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
+{
+ u8 jedecid[5] = { };
+ int ret;
+
+ ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
+ if (ret)
+ return false;
+
+ return !strncmp("JEDEC", jedecid, sizeof(jedecid));
+}
+
+static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
+{
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(cmd, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, cmd, -1, -1);
+
+ return 0;
+}
+
+static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
+{
+ u16 column = ((u16)addr << 8) | addr;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_ADDR(1, &addr, 0),
+ NAND_OP_8BIT_DATA_OUT(1, &val, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_NONE, column, -1);
+ chip->legacy.write_byte(chip, val);
+
+ return 0;
+}
+
+static int hynix_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
+{
+ struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
+ const u8 *values;
+ int i, ret;
+
+ values = hynix->read_retry->values +
+ (retry_mode * hynix->read_retry->nregs);
+
+ /* Enter 'Set Hynix Parameters' mode */
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the NAND in the requested read-retry mode.
+ * This is done by setting pre-defined values in internal NAND
+ * registers.
+ *
+ * The set of registers is NAND specific, and the values are either
+ * predefined or extracted from an OTP area on the NAND (values are
+ * probably tweaked at production in this case).
+ */
+ for (i = 0; i < hynix->read_retry->nregs; i++) {
+ ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
+ values[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply the new settings. */
+ return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+}
+
+/**
+ * hynix_get_majority - get the value that is occurring the most in a given
+ * set of values
+ * @in: the array of values to test
+ * @repeat: the size of the in array
+ * @out: pointer used to store the output value
+ *
+ * This function implements the 'majority check' logic that is supposed to
+ * overcome the unreliability of MLC NANDs when reading the OTP area storing
+ * the read-retry parameters.
+ *
+ * It's based on a pretty simple assumption: if we repeat the same value
+ * several times and then take the one that is occurring the most, we should
+ * find the correct value.
+ * Let's hope this dummy algorithm prevents us from losing the read-retry
+ * parameters.
+ */
+static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
+{
+ int i, j, half = repeat / 2;
+
+ /*
+ * We only test the first half of the in array because we must ensure
+ * that the value is at least occurring repeat / 2 times.
+ *
+ * This loop is suboptimal since we may count the occurrences of the
+ * same value several time, but we are doing that on small sets, which
+ * makes it acceptable.
+ */
+ for (i = 0; i < half; i++) {
+ int cnt = 0;
+ u8 val = in[i];
+
+ /* Count all values that are matching the one at index i. */
+ for (j = i + 1; j < repeat; j++) {
+ if (in[j] == val)
+ cnt++;
+ }
+
+ /* We found a value occurring more than repeat / 2. */
+ if (cnt > half) {
+ *out = val;
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+static int hynix_read_rr_otp(struct nand_chip *chip,
+ const struct hynix_read_retry_otp *info,
+ void *buf)
+{
+ int i, ret;
+
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->nregs; i++) {
+ ret = hynix_nand_reg_write_op(chip, info->regs[i],
+ info->values[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ /* Sequence to enter OTP mode? */
+ ret = hynix_nand_cmd_op(chip, 0x17);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x4);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x19);
+ if (ret)
+ return ret;
+
+ /* Now read the page */
+ ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
+ if (ret)
+ return ret;
+
+ /* Put everything back to normal */
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_reg_write_op(chip, 0x38, 0);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ return nand_read_page_op(chip, 0, 0, NULL, 0);
+}
+
+#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
+#define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8
+#define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \
+ (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
+
+static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
+ int mode, int reg, bool inv, u8 *val)
+{
+ u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
+ int val_offs = (mode * nregs) + reg;
+ int set_size = nmodes * nregs;
+ int i, ret;
+
+ for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
+ int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
+
+ tmp[i] = buf[val_offs + set_offs];
+ }
+
+ ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
+ if (ret)
+ return ret;
+
+ if (inv)
+ *val = ~*val;
+
+ return 0;
+}
+
+static u8 hynix_1xnm_mlc_read_retry_regs[] = {
+ 0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
+};
+
+static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
+ const struct hynix_read_retry_otp *info)
+{
+ struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
+ struct hynix_read_retry *rr = NULL;
+ int ret, i, j;
+ u8 nregs, nmodes;
+ u8 *buf;
+
+ buf = kmalloc(info->size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hynix_read_rr_otp(chip, info, buf);
+ if (ret)
+ goto out;
+
+ ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
+ &nmodes);
+ if (ret)
+ goto out;
+
+ ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
+ NAND_HYNIX_1XNM_RR_REPEAT,
+ &nregs);
+ if (ret)
+ goto out;
+
+ rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
+ if (!rr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < nmodes; i++) {
+ for (j = 0; j < nregs; j++) {
+ u8 *val = rr->values + (i * nregs);
+
+ ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
+ false, val);
+ if (!ret)
+ continue;
+
+ ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
+ true, val);
+ if (ret)
+ goto out;
+ }
+ }
+
+ rr->nregs = nregs;
+ rr->regs = hynix_1xnm_mlc_read_retry_regs;
+ hynix->read_retry = rr;
+ chip->ops.setup_read_retry = hynix_nand_setup_read_retry;
+ chip->read_retries = nmodes;
+
+out:
+ kfree(buf);
+
+ if (ret)
+ kfree(rr);
+
+ return ret;
+}
+
+static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
+static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
+
+static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
+ {
+ .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
+ .regs = hynix_mlc_1xnm_rr_otp_regs,
+ .values = hynix_mlc_1xnm_rr_otp_values,
+ .page = 0x21f,
+ .size = 784
+ },
+ {
+ .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
+ .regs = hynix_mlc_1xnm_rr_otp_regs,
+ .values = hynix_mlc_1xnm_rr_otp_values,
+ .page = 0x200,
+ .size = 528,
+ },
+};
+
+static int hynix_nand_rr_init(struct nand_chip *chip)
+{
+ int i, ret = 0;
+ bool valid_jedecid;
+
+ valid_jedecid = hynix_nand_has_valid_jedecid(chip);
+
+ /*
+ * We only support read-retry for 1xnm NANDs, and those NANDs all
+ * expose a valid JEDEC ID.
+ */
+ if (valid_jedecid) {
+ u8 nand_tech = chip->id.data[5] >> 4;
+
+ /* 1xnm technology */
+ if (nand_tech == 4) {
+ for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
+ i++) {
+ /*
+ * FIXME: Hynix recommend to copy the
+ * read-retry OTP area into a normal page.
+ */
+ ret = hynix_mlc_1xnm_rr_init(chip,
+ hynix_mlc_1xnm_rr_otps);
+ if (!ret)
+ break;
+ }
+ }
+ }
+
+ if (ret)
+ pr_warn("failed to initialize read-retry infrastructure");
+
+ return 0;
+}
+
+static void hynix_nand_extract_oobsize(struct nand_chip *chip,
+ bool valid_jedecid)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ u8 oobsize;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ oobsize = ((chip->id.data[3] >> 2) & 0x3) |
+ ((chip->id.data[3] >> 4) & 0x4);
+
+ if (valid_jedecid) {
+ switch (oobsize) {
+ case 0:
+ memorg->oobsize = 2048;
+ break;
+ case 1:
+ memorg->oobsize = 1664;
+ break;
+ case 2:
+ memorg->oobsize = 1024;
+ break;
+ case 3:
+ memorg->oobsize = 640;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid OOB size");
+ break;
+ }
+ } else {
+ switch (oobsize) {
+ case 0:
+ memorg->oobsize = 128;
+ break;
+ case 1:
+ memorg->oobsize = 224;
+ break;
+ case 2:
+ memorg->oobsize = 448;
+ break;
+ case 3:
+ memorg->oobsize = 64;
+ break;
+ case 4:
+ memorg->oobsize = 32;
+ break;
+ case 5:
+ memorg->oobsize = 16;
+ break;
+ case 6:
+ memorg->oobsize = 640;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid OOB size");
+ break;
+ }
+
+ /*
+ * The datasheet of H27UCG8T2BTR mentions that the "Redundant
+ * Area Size" is encoded "per 8KB" (page size). This chip uses
+ * a page size of 16KiB. The datasheet mentions an OOB size of
+ * 1.280 bytes, but the OOB size encoded in the ID bytes (using
+ * the existing logic above) is 640 bytes.
+ * Update the OOB size for this chip by taking the value
+ * determined above and scaling it to the actual page size (so
+ * the actual OOB size for this chip is: 640 * 16k / 8k).
+ */
+ if (chip->id.data[1] == 0xde)
+ memorg->oobsize *= memorg->pagesize / SZ_8K;
+ }
+
+ mtd->oobsize = memorg->oobsize;
+}
+
+static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
+ bool valid_jedecid)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
+ u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
+
+ if (valid_jedecid) {
+ /* Reference: H27UCG8T2E datasheet */
+ requirements.step_size = 1024;
+
+ switch (ecc_level) {
+ case 0:
+ requirements.step_size = 0;
+ requirements.strength = 0;
+ break;
+ case 1:
+ requirements.strength = 4;
+ break;
+ case 2:
+ requirements.strength = 24;
+ break;
+ case 3:
+ requirements.strength = 32;
+ break;
+ case 4:
+ requirements.strength = 40;
+ break;
+ case 5:
+ requirements.strength = 50;
+ break;
+ case 6:
+ requirements.strength = 60;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid ECC requirements");
+ }
+ } else {
+ /*
+ * The ECC requirements field meaning depends on the
+ * NAND technology.
+ */
+ u8 nand_tech = chip->id.data[5] & 0x7;
+
+ if (nand_tech < 3) {
+ /* > 26nm, reference: H27UBG8T2A datasheet */
+ if (ecc_level < 5) {
+ requirements.step_size = 512;
+ requirements.strength = 1 << ecc_level;
+ } else if (ecc_level < 7) {
+ if (ecc_level == 5)
+ requirements.step_size = 2048;
+ else
+ requirements.step_size = 1024;
+ requirements.strength = 24;
+ } else {
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided
+ * to use a different extended ID format, and
+ * we should find a way to support it.
+ */
+ WARN(1, "Invalid ECC requirements");
+ }
+ } else {
+ /* <= 26nm, reference: H27UBG8T2B datasheet */
+ if (!ecc_level) {
+ requirements.step_size = 0;
+ requirements.strength = 0;
+ } else if (ecc_level < 5) {
+ requirements.step_size = 512;
+ requirements.strength = 1 << (ecc_level - 1);
+ } else {
+ requirements.step_size = 1024;
+ requirements.strength = 24 +
+ (8 * (ecc_level - 5));
+ }
+ }
+ }
+
+ nanddev_set_ecc_requirements(base, &requirements);
+}
+
+static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
+ bool valid_jedecid)
+{
+ u8 nand_tech;
+
+ /* We need scrambling on all TLC NANDs*/
+ if (nanddev_bits_per_cell(&chip->base) > 2)
+ chip->options |= NAND_NEED_SCRAMBLING;
+
+ /* And on MLC NANDs with sub-3xnm process */
+ if (valid_jedecid) {
+ nand_tech = chip->id.data[5] >> 4;
+
+ /* < 3xnm */
+ if (nand_tech > 0)
+ chip->options |= NAND_NEED_SCRAMBLING;
+ } else {
+ nand_tech = chip->id.data[5] & 0x7;
+
+ /* < 32nm */
+ if (nand_tech > 2)
+ chip->options |= NAND_NEED_SCRAMBLING;
+ }
+}
+
+static void hynix_nand_decode_id(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ bool valid_jedecid;
+ u8 tmp;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /*
+ * Exclude all SLC NANDs from this advanced detection scheme.
+ * According to the ranges defined in several datasheets, it might
+ * appear that even SLC NANDs could fall in this extended ID scheme.
+ * If that the case rework the test to let SLC NANDs go through the
+ * detection process.
+ */
+ if (chip->id.len < 6 || nand_is_slc(chip)) {
+ nand_decode_ext_id(chip);
+ return;
+ }
+
+ /* Extract pagesize */
+ memorg->pagesize = 2048 << (chip->id.data[3] & 0x03);
+ mtd->writesize = memorg->pagesize;
+
+ tmp = (chip->id.data[3] >> 4) & 0x3;
+ /*
+ * When bit7 is set that means we start counting at 1MiB, otherwise
+ * we start counting at 128KiB and shift this value the content of
+ * ID[3][4:5].
+ * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
+ * this case the erasesize is set to 768KiB.
+ */
+ if (chip->id.data[3] & 0x80) {
+ memorg->pages_per_eraseblock = (SZ_1M << tmp) /
+ memorg->pagesize;
+ mtd->erasesize = SZ_1M << tmp;
+ } else if (tmp == 3) {
+ memorg->pages_per_eraseblock = (SZ_512K + SZ_256K) /
+ memorg->pagesize;
+ mtd->erasesize = SZ_512K + SZ_256K;
+ } else {
+ memorg->pages_per_eraseblock = (SZ_128K << tmp) /
+ memorg->pagesize;
+ mtd->erasesize = SZ_128K << tmp;
+ }
+
+ /*
+ * Modern Toggle DDR NANDs have a valid JEDECID even though they are
+ * not exposing a valid JEDEC parameter table.
+ * These NANDs use a different NAND ID scheme.
+ */
+ valid_jedecid = hynix_nand_has_valid_jedecid(chip);
+
+ hynix_nand_extract_oobsize(chip, valid_jedecid);
+ hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
+ hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
+}
+
+static void hynix_nand_cleanup(struct nand_chip *chip)
+{
+ struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
+
+ if (!hynix)
+ return;
+
+ kfree(hynix->read_retry);
+ kfree(hynix);
+ nand_set_manufacturer_data(chip, NULL);
+}
+
+static int
+h27ucg8t2atrbc_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
+
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int hynix_nand_init(struct nand_chip *chip)
+{
+ struct hynix_nand *hynix;
+ int ret;
+
+ if (!nand_is_slc(chip))
+ chip->options |= NAND_BBM_LASTPAGE;
+ else
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
+ if (!hynix)
+ return -ENOMEM;
+
+ nand_set_manufacturer_data(chip, hynix);
+
+ if (!strncmp("H27UCG8T2ATR-BC", chip->parameters.model,
+ sizeof("H27UCG8T2ATR-BC") - 1))
+ chip->ops.choose_interface_config =
+ h27ucg8t2atrbc_choose_interface_config;
+
+ ret = hynix_nand_rr_init(chip);
+ if (ret)
+ hynix_nand_cleanup(chip);
+
+ return ret;
+}
+
+const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
+ .detect = hynix_nand_decode_id,
+ .init = hynix_nand_init,
+ .cleanup = hynix_nand_cleanup,
+};
diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c
new file mode 100644
index 000000000..b9945791a
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_ids.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
+ */
+
+#include <linux/sizes.h>
+
+#include "internals.h"
+
+#define LP_OPTIONS 0
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
+
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
+
+/*
+ * The chip ID list:
+ * name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
+struct nand_flash_dev nand_flash_ids[] = {
+ /*
+ * Some incompatible NAND chips share device ID's and so must be
+ * listed by full ID. We list them first so that we can easily identify
+ * the most specific match.
+ */
+ {"TC58NVG0S3E 1G 3.3V 8-bit",
+ { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
+ SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), },
+ {"TC58NVG2S0F 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG2S0H 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x16, 0x08, 0x00} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 256, NAND_ECC_INFO(8, SZ_512) },
+ {"TC58NVG3S0F 8G 3.3V 8-bit",
+ { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+ SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG5D2 32G 3.3V 8-bit",
+ { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+ SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"TC58NVG6D2 64G 3.3V 8-bit",
+ { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+ SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"SDTNRGAMA 64G 3.3V 8-bit",
+ { .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} },
+ SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
+ {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
+ { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
+ SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+ NAND_ECC_INFO(40, SZ_1K) },
+ {"TH58NVG2S3HBAI4 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x91, 0x15, 0x76} },
+ SZ_2K, SZ_512, SZ_128K, 0, 5, 128, NAND_ECC_INFO(8, SZ_512) },
+
+ LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
+
+ /*
+ * These are the new chips with large page size. Their page size and
+ * eraseblock size are determined from the extended ID bytes.
+ */
+
+ /* 512 Megabit */
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16),
+
+ /* 1 Gigabit */
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
+
+ /* 2 Gigabit */
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
+
+ /* 4 Gigabit */
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
+
+ /* 8 Gigabit */
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
+
+ /* 16 Gigabit */
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
+
+ /* 32 Gigabit */
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
+
+ /* 64 Gigabit */
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
+
+ /* 128 Gigabit */
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
+
+ /* 256 Gigabit */
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
+
+ /* 512 Gigabit */
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
+
+ {NULL}
+};
+
+/* Manufacturer IDs */
+static const struct nand_manufacturer_desc nand_manufacturer_descs[] = {
+ {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops},
+ {NAND_MFR_ATO, "ATO"},
+ {NAND_MFR_EON, "Eon"},
+ {NAND_MFR_ESMT, "ESMT", &esmt_nand_manuf_ops},
+ {NAND_MFR_FUJITSU, "Fujitsu"},
+ {NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops},
+ {NAND_MFR_INTEL, "Intel"},
+ {NAND_MFR_MACRONIX, "Macronix", &macronix_nand_manuf_ops},
+ {NAND_MFR_MICRON, "Micron", &micron_nand_manuf_ops},
+ {NAND_MFR_NATIONAL, "National"},
+ {NAND_MFR_RENESAS, "Renesas"},
+ {NAND_MFR_SAMSUNG, "Samsung", &samsung_nand_manuf_ops},
+ {NAND_MFR_SANDISK, "SanDisk"},
+ {NAND_MFR_STMICRO, "ST Micro"},
+ {NAND_MFR_TOSHIBA, "Toshiba", &toshiba_nand_manuf_ops},
+ {NAND_MFR_WINBOND, "Winbond"},
+};
+
+/**
+ * nand_get_manufacturer_desc - Get manufacturer information from the
+ * manufacturer ID
+ * @id: manufacturer ID
+ *
+ * Returns a nand_manufacturer_desc object if the manufacturer is defined
+ * in the NAND manufacturers database, NULL otherwise.
+ */
+const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nand_manufacturer_descs); i++)
+ if (nand_manufacturer_descs[i].id == id)
+ return &nand_manufacturer_descs[i];
+
+ return NULL;
+}
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
new file mode 100644
index 000000000..85b6d9372
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all ONFI helpers.
+ */
+
+#include <linux/slab.h>
+
+#include "internals.h"
+
+#define JEDEC_PARAM_PAGES 3
+
+/*
+ * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
+ */
+int nand_jedec_detect(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ struct nand_jedec_params *p;
+ struct jedec_ecc_info *ecc;
+ bool use_datain = false;
+ int jedec_version = 0;
+ char id[5];
+ int i, val, ret;
+ u16 crc;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* Try JEDEC for unknown chip or LP */
+ ret = nand_readid_op(chip, 0x40, id, sizeof(id));
+ if (ret || strncmp(id, "JEDEC", sizeof(id)))
+ return 0;
+
+ /* JEDEC chip: allocate a buffer to hold its parameter page */
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, p, sizeof(*p), true, true))
+ use_datain = true;
+
+ for (i = 0; i < JEDEC_PARAM_PAGES; i++) {
+ if (!i)
+ ret = nand_read_param_page_op(chip, 0x40, p,
+ sizeof(*p));
+ else if (use_datain)
+ ret = nand_read_data_op(chip, p, sizeof(*p), true,
+ false);
+ else
+ ret = nand_change_read_column_op(chip, sizeof(*p) * i,
+ p, sizeof(*p), true);
+ if (ret) {
+ ret = 0;
+ goto free_jedec_param_page;
+ }
+
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 510);
+ if (crc == le16_to_cpu(p->crc))
+ break;
+ }
+
+ if (i == JEDEC_PARAM_PAGES) {
+ pr_err("Could not find valid JEDEC parameter page; aborting\n");
+ goto free_jedec_param_page;
+ }
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 2))
+ jedec_version = 10;
+ else if (val & (1 << 1))
+ jedec_version = 1; /* vendor specific version */
+
+ if (!jedec_version) {
+ pr_info("unsupported JEDEC version: %d\n", val);
+ goto free_jedec_param_page;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_jedec_param_page;
+ }
+
+ memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ mtd->writesize = memorg->pagesize;
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ memorg->pages_per_eraseblock =
+ 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
+
+ memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->luns_per_target = p->lun_count;
+ memorg->planes_per_lun = 1 << p->multi_plane_addr;
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ memorg->eraseblocks_per_lun =
+ 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ memorg->bits_per_cell = p->bits_per_cell;
+
+ if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ /* ECC info */
+ ecc = &p->ecc_info[0];
+
+ if (ecc->codeword_size >= 9) {
+ struct nand_ecc_props requirements = {
+ .strength = ecc->ecc_bits,
+ .step_size = 1 << ecc->codeword_size,
+ };
+
+ nanddev_set_ecc_requirements(base, &requirements);
+ } else {
+ pr_warn("Invalid codeword size\n");
+ }
+
+ ret = 1;
+
+free_jedec_param_page:
+ kfree(p);
+ return ret;
+}
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
new file mode 100644
index 000000000..2bcc03714
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all legacy helpers/code that should be removed
+ * at some point.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+
+#include "internals.h"
+
+/**
+ * nand_read_byte - [DEFAULT] read one byte from the chip
+ * @chip: NAND chip object
+ *
+ * Default read function for 8bit buswidth
+ */
+static uint8_t nand_read_byte(struct nand_chip *chip)
+{
+ return readb(chip->legacy.IO_ADDR_R);
+}
+
+/**
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @chip: NAND chip object
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
+ *
+ */
+static uint8_t nand_read_byte16(struct nand_chip *chip)
+{
+ return (uint8_t) cpu_to_le16(readw(chip->legacy.IO_ADDR_R));
+}
+
+/**
+ * nand_select_chip - [DEFAULT] control CE line
+ * @chip: NAND chip object
+ * @chipnr: chipnumber to select, -1 for deselect
+ *
+ * Default select function for 1 chip devices.
+ */
+static void nand_select_chip(struct nand_chip *chip, int chipnr)
+{
+ switch (chipnr) {
+ case -1:
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @chip: NAND chip object
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct nand_chip *chip, uint8_t byte)
+{
+ chip->legacy.write_buf(chip, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @chip: NAND chip object
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct nand_chip *chip, uint8_t byte)
+{
+ uint16_t word = byte;
+
+ /*
+ * It's not entirely clear what should happen to I/O[15:8] when writing
+ * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+ *
+ * When the host supports a 16-bit bus width, only data is
+ * transferred at the 16-bit width. All address and command line
+ * transfers shall use only the lower 8-bits of the data bus. During
+ * command transfers, the host may place any value on the upper
+ * 8-bits of the data bus. During address transfers, the host shall
+ * set the upper 8-bits of the data bus to 00h.
+ *
+ * One user of the write_byte callback is nand_set_features. The
+ * four parameters are specified to be written to I/O[7:0], but this is
+ * neither an address nor a command transfer. Let's assume a 0 on the
+ * upper I/O lines is OK.
+ */
+ chip->legacy.write_buf(chip, (uint8_t *)&word, 2);
+}
+
+/**
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 8bit buswidth.
+ */
+static void nand_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ iowrite8_rep(chip->legacy.IO_ADDR_W, buf, len);
+}
+
+/**
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 8bit buswidth.
+ */
+static void nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ ioread8_rep(chip->legacy.IO_ADDR_R, buf, len);
+}
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswidth.
+ */
+static void nand_write_buf16(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ u16 *p = (u16 *) buf;
+
+ iowrite16_rep(chip->legacy.IO_ADDR_W, p, len >> 1);
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswidth.
+ */
+static void nand_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ u16 *p = (u16 *) buf;
+
+ ioread16_rep(chip->legacy.IO_ADDR_R, p, len >> 1);
+}
+
+/**
+ * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @chip: NAND chip object
+ * @timeo: Timeout
+ *
+ * Helper function for nand_wait_ready used when needing to wait in interrupt
+ * context.
+ */
+static void panic_nand_wait_ready(struct nand_chip *chip, unsigned long timeo)
+{
+ int i;
+
+ /* Wait for the device to get ready */
+ for (i = 0; i < timeo; i++) {
+ if (chip->legacy.dev_ready(chip))
+ break;
+ touch_softlockup_watchdog();
+ mdelay(1);
+ }
+}
+
+/**
+ * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @chip: NAND chip object
+ *
+ * Wait for the ready pin after a command, and warn if a timeout occurs.
+ */
+void nand_wait_ready(struct nand_chip *chip)
+{
+ unsigned long timeo = 400;
+
+ if (in_interrupt() || oops_in_progress)
+ return panic_nand_wait_ready(chip, timeo);
+
+ /* Wait until command is processed or timeout occurs */
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ if (chip->legacy.dev_ready(chip))
+ return;
+ cond_resched();
+ } while (time_before(jiffies, timeo));
+
+ if (!chip->legacy.dev_ready(chip))
+ pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
+}
+EXPORT_SYMBOL_GPL(nand_wait_ready);
+
+/**
+ * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
+ * @chip: NAND chip object
+ * @timeo: Timeout in ms
+ *
+ * Wait for status ready (i.e. command done) or timeout.
+ */
+static void nand_wait_status_ready(struct nand_chip *chip, unsigned long timeo)
+{
+ int ret;
+
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true,
+ false);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ touch_softlockup_watchdog();
+ } while (time_before(jiffies, timeo));
+};
+
+/**
+ * nand_command - [DEFAULT] Send command to NAND device
+ * @chip: NAND chip object
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This function is used for small page devices
+ * (512 Bytes per page).
+ */
+static void nand_command(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
+
+ /* Write out the command to the device */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ chip->legacy.cmd_ctrl(chip, readcmd, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (command != NAND_CMD_NONE)
+ chip->legacy.cmd_ctrl(chip, command, ctrl);
+
+ /* Address cycle, when necessary */
+ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->legacy.cmd_ctrl(chip, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (page_addr != -1) {
+ chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->legacy.cmd_ctrl(chip, page_addr >> 8, ctrl);
+ if (chip->options & NAND_ROW_ADDR_3)
+ chip->legacy.cmd_ctrl(chip, page_addr >> 16, ctrl);
+ }
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status and sequential
+ * in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_NONE:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ case NAND_CMD_SET_FEATURES:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->legacy.dev_ready)
+ break;
+ udelay(chip->legacy.chip_delay);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(chip, 250);
+ return;
+
+ /* This applies to read commands */
+ case NAND_CMD_READ0:
+ /*
+ * READ0 is sometimes used to exit GET STATUS mode. When this
+ * is the case no address cycles are requested, and we can use
+ * this information to detect that we should not wait for the
+ * device to be ready.
+ */
+ if (column == -1 && page_addr == -1)
+ return;
+ fallthrough;
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!chip->legacy.dev_ready) {
+ udelay(chip->legacy.chip_delay);
+ return;
+ }
+ }
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(chip);
+}
+
+static void nand_ccs_delay(struct nand_chip *chip)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+
+ /*
+ * The controller already takes care of waiting for tCCS when the RNDIN
+ * or RNDOUT command is sent, return directly.
+ */
+ if (!(chip->options & NAND_WAIT_TCCS))
+ return;
+
+ /*
+ * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
+ * (which should be safe for all NANDs).
+ */
+ if (nand_controller_can_setup_interface(chip))
+ ndelay(sdr->tCCS_min / 1000);
+ else
+ ndelay(500);
+}
+
+/**
+ * nand_command_lp - [DEFAULT] Send command to NAND large page device
+ * @chip: NAND chip object
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void nand_command_lp(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Command latch cycle */
+ if (command != NAND_CMD_NONE)
+ chip->legacy.cmd_ctrl(chip, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+
+ if (column != -1 || page_addr != -1) {
+ int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->legacy.cmd_ctrl(chip, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+
+ /* Only output a single addr cycle for 8bits opcodes. */
+ if (!nand_opcode_8bits(command))
+ chip->legacy.cmd_ctrl(chip, column >> 8, ctrl);
+ }
+ if (page_addr != -1) {
+ chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
+ chip->legacy.cmd_ctrl(chip, page_addr >> 8,
+ NAND_NCE | NAND_ALE);
+ if (chip->options & NAND_ROW_ADDR_3)
+ chip->legacy.cmd_ctrl(chip, page_addr >> 16,
+ NAND_NCE | NAND_ALE);
+ }
+ }
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in and status need no delay.
+ */
+ switch (command) {
+
+ case NAND_CMD_NONE:
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ case NAND_CMD_SET_FEATURES:
+ return;
+
+ case NAND_CMD_RNDIN:
+ nand_ccs_delay(chip);
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->legacy.dev_ready)
+ break;
+ udelay(chip->legacy.chip_delay);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(chip, 250);
+ return;
+
+ case NAND_CMD_RNDOUT:
+ /* No ready / busy check necessary */
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_RNDOUTSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ nand_ccs_delay(chip);
+ return;
+
+ case NAND_CMD_READ0:
+ /*
+ * READ0 is sometimes used to exit GET STATUS mode. When this
+ * is the case no address cycles are requested, and we can use
+ * this information to detect that READSTART should not be
+ * issued.
+ */
+ if (column == -1 && page_addr == -1)
+ return;
+
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_READSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ fallthrough; /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay.
+ */
+ if (!chip->legacy.dev_ready) {
+ udelay(chip->legacy.chip_delay);
+ return;
+ }
+ }
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(chip);
+}
+
+/**
+ * nand_get_set_features_notsupp - set/get features stub returning -ENOTSUPP
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ *
+ * Should be used by NAND controller drivers that do not support the SET/GET
+ * FEATURES operations.
+ */
+int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(nand_get_set_features_notsupp);
+
+/**
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @chip: NAND chip structure
+ *
+ * Wait for command done. This applies to erase and program only.
+ */
+static int nand_wait(struct nand_chip *chip)
+{
+
+ unsigned long timeo = 400;
+ u8 status;
+ int ret;
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in any
+ * case on any machine.
+ */
+ ndelay(100);
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ if (in_interrupt() || oops_in_progress)
+ panic_nand_wait(chip, timeo);
+ else {
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ if (chip->legacy.dev_ready) {
+ if (chip->legacy.dev_ready(chip))
+ break;
+ } else {
+ ret = nand_read_data_op(chip, &status,
+ sizeof(status), true,
+ false);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, timeo));
+ }
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true, false);
+ if (ret)
+ return ret;
+
+ /* This can happen if in case of timeout or buggy dev_ready */
+ WARN_ON(!(status & NAND_STATUS_READY));
+ return status;
+}
+
+void nand_legacy_set_defaults(struct nand_chip *chip)
+{
+ unsigned int busw = chip->options & NAND_BUSWIDTH_16;
+
+ if (nand_has_exec_op(chip))
+ return;
+
+ /* check for proper chip_delay setup, set 20us if not */
+ if (!chip->legacy.chip_delay)
+ chip->legacy.chip_delay = 20;
+
+ /* check, if a user supplied command function given */
+ if (!chip->legacy.cmdfunc)
+ chip->legacy.cmdfunc = nand_command;
+
+ /* check, if a user supplied wait function given */
+ if (chip->legacy.waitfunc == NULL)
+ chip->legacy.waitfunc = nand_wait;
+
+ if (!chip->legacy.select_chip)
+ chip->legacy.select_chip = nand_select_chip;
+
+ /* If called twice, pointers that depend on busw may need to be reset */
+ if (!chip->legacy.read_byte || chip->legacy.read_byte == nand_read_byte)
+ chip->legacy.read_byte = busw ? nand_read_byte16 : nand_read_byte;
+ if (!chip->legacy.write_buf || chip->legacy.write_buf == nand_write_buf)
+ chip->legacy.write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!chip->legacy.write_byte || chip->legacy.write_byte == nand_write_byte)
+ chip->legacy.write_byte = busw ? nand_write_byte16 : nand_write_byte;
+ if (!chip->legacy.read_buf || chip->legacy.read_buf == nand_read_buf)
+ chip->legacy.read_buf = busw ? nand_read_buf16 : nand_read_buf;
+}
+
+void nand_legacy_adjust_cmdfunc(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Do not replace user supplied command function! */
+ if (mtd->writesize > 512 && chip->legacy.cmdfunc == nand_command)
+ chip->legacy.cmdfunc = nand_command_lp;
+}
+
+int nand_legacy_check_hooks(struct nand_chip *chip)
+{
+ /*
+ * ->legacy.cmdfunc() is legacy and will only be used if ->exec_op() is
+ * not populated.
+ */
+ if (nand_has_exec_op(chip))
+ return 0;
+
+ /*
+ * Default functions assigned for ->legacy.cmdfunc() and
+ * ->legacy.select_chip() both expect ->legacy.cmd_ctrl() to be
+ * populated.
+ */
+ if ((!chip->legacy.cmdfunc || !chip->legacy.select_chip) &&
+ !chip->legacy.cmd_ctrl) {
+ pr_err("->legacy.cmd_ctrl() should be provided\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
new file mode 100644
index 000000000..1472f925f
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include "linux/delay.h"
+#include "internals.h"
+
+#define MACRONIX_READ_RETRY_BIT BIT(0)
+#define MACRONIX_NUM_READ_RETRY_MODES 6
+
+#define ONFI_FEATURE_ADDR_MXIC_PROTECTION 0xA0
+#define MXIC_BLOCK_PROTECTION_ALL_LOCK 0x38
+#define MXIC_BLOCK_PROTECTION_ALL_UNLOCK 0x0
+
+#define ONFI_FEATURE_ADDR_MXIC_RANDOMIZER 0xB0
+#define MACRONIX_RANDOMIZER_BIT BIT(1)
+#define MACRONIX_RANDOMIZER_ENPGM BIT(0)
+#define MACRONIX_RANDOMIZER_RANDEN BIT(1)
+#define MACRONIX_RANDOMIZER_RANDOPT BIT(2)
+#define MACRONIX_RANDOMIZER_MODE_ENTER \
+ (MACRONIX_RANDOMIZER_ENPGM | \
+ MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+#define MACRONIX_RANDOMIZER_MODE_EXIT \
+ (MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+
+#define MXIC_CMD_POWER_DOWN 0xB9
+
+struct nand_onfi_vendor_macronix {
+ u8 reserved;
+ u8 reliability_func;
+} __packed;
+
+static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+
+ if (!chip->parameters.supports_set_get_features ||
+ !test_bit(ONFI_FEATURE_ADDR_READ_RETRY,
+ chip->parameters.set_feature_list))
+ return -ENOTSUPP;
+
+ feature[0] = mode;
+ return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
+}
+
+static int macronix_nand_randomizer_check_enable(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ if (feature[0])
+ return feature[0];
+
+ feature[0] = MACRONIX_RANDOMIZER_MODE_ENTER;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ /* RANDEN and RANDOPT OTP bits are programmed */
+ feature[0] = 0x0;
+ ret = nand_prog_page_op(chip, 0, 0, feature, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ feature[0] &= MACRONIX_RANDOMIZER_MODE_EXIT;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void macronix_nand_onfi_init(struct nand_chip *chip)
+{
+ struct nand_parameters *p = &chip->parameters;
+ struct nand_onfi_vendor_macronix *mxic;
+ struct device_node *dn = nand_get_flash_node(chip);
+ int rand_otp = 0;
+ int ret;
+
+ if (!p->onfi)
+ return;
+
+ if (of_find_property(dn, "mxic,enable-randomizer-otp", NULL))
+ rand_otp = 1;
+
+ mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
+ /* Subpage write is prohibited in randomizer operatoin */
+ if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE &&
+ mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) {
+ if (p->supports_set_get_features) {
+ bitmap_set(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ bitmap_set(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ ret = macronix_nand_randomizer_check_enable(chip);
+ if (ret < 0) {
+ bitmap_clear(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ bitmap_clear(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ pr_info("Macronix NAND randomizer failed\n");
+ } else {
+ pr_info("Macronix NAND randomizer enabled\n");
+ }
+ }
+ }
+
+ if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
+ return;
+
+ chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES;
+ chip->ops.setup_read_retry = macronix_nand_setup_read_retry;
+
+ if (p->supports_set_get_features) {
+ bitmap_set(p->set_feature_list,
+ ONFI_FEATURE_ADDR_READ_RETRY, 1);
+ bitmap_set(p->get_feature_list,
+ ONFI_FEATURE_ADDR_READ_RETRY, 1);
+ }
+}
+
+/*
+ * Macronix AC series does not support using SET/GET_FEATURES to change
+ * the timings unlike what is declared in the parameter page. Unflag
+ * this feature to avoid unnecessary downturns.
+ */
+static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
+{
+ int i;
+ static const char * const broken_get_timings[] = {
+ "MX30LF1G18AC",
+ "MX30LF1G28AC",
+ "MX30LF2G18AC",
+ "MX30LF2G28AC",
+ "MX30LF4G18AC",
+ "MX30LF4G28AC",
+ "MX60LF8G18AC",
+ "MX30UF1G18AC",
+ "MX30UF1G16AC",
+ "MX30UF2G18AC",
+ "MX30UF2G16AC",
+ "MX30UF4G18AC",
+ "MX30UF4G16AC",
+ "MX30UF4G28AC",
+ };
+
+ if (!chip->parameters.supports_set_get_features)
+ return;
+
+ i = match_string(broken_get_timings, ARRAY_SIZE(broken_get_timings),
+ chip->parameters.model);
+ if (i < 0)
+ return;
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ bitmap_clear(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+}
+
+/*
+ * Macronix NAND supports Block Protection by Protectoin(PT) pin;
+ * active high at power-on which protects the entire chip even the #WP is
+ * disabled. Lock/unlock protection area can be partition according to
+ * protection bits, i.e. upper 1/2 locked, upper 1/4 locked and so on.
+ */
+static int mxic_nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_LOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static int mxic_nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static void macronix_nand_block_protection_support(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret || feature[0] != MXIC_BLOCK_PROTECTION_ALL_LOCK) {
+ if (ret)
+ pr_err("Block protection check failed\n");
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+ return;
+ }
+
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ chip->ops.lock_area = mxic_nand_lock;
+ chip->ops.unlock_area = mxic_nand_unlock;
+}
+
+static int nand_power_down_op(struct nand_chip *chip)
+{
+ int ret;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(MXIC_CMD_POWER_DOWN, 0),
+ };
+
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ } else {
+ chip->legacy.cmdfunc(chip, MXIC_CMD_POWER_DOWN, -1, -1);
+ }
+
+ return 0;
+}
+
+static int mxic_nand_suspend(struct nand_chip *chip)
+{
+ int ret;
+
+ nand_select_target(chip, 0);
+ ret = nand_power_down_op(chip);
+ if (ret < 0)
+ pr_err("Suspending MXIC NAND chip failed (%d)\n", ret);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static void mxic_nand_resume(struct nand_chip *chip)
+{
+ /*
+ * Toggle #CS pin to resume NAND device and don't care
+ * of the others CLE, #WE, #RE pins status.
+ * A NAND controller ensure it is able to assert/de-assert #CS
+ * by sending any byte over the NAND bus.
+ * i.e.,
+ * NAND power down command or reset command w/o R/B# status checking.
+ */
+ nand_select_target(chip, 0);
+ nand_power_down_op(chip);
+ /* The minimum of a recovery time tRDP is 35 us */
+ usleep_range(35, 100);
+ nand_deselect_target(chip);
+}
+
+static void macronix_nand_deep_power_down_support(struct nand_chip *chip)
+{
+ int i;
+ static const char * const deep_power_down_dev[] = {
+ "MX30UF1G28AD",
+ "MX30UF2G28AD",
+ "MX30UF4G28AD",
+ };
+
+ i = match_string(deep_power_down_dev, ARRAY_SIZE(deep_power_down_dev),
+ chip->parameters.model);
+ if (i < 0)
+ return;
+
+ chip->ops.suspend = mxic_nand_suspend;
+ chip->ops.resume = mxic_nand_resume;
+}
+
+static int macronix_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ macronix_nand_fix_broken_get_timings(chip);
+ macronix_nand_onfi_init(chip);
+ macronix_nand_block_protection_support(chip);
+ macronix_nand_deep_power_down_support(chip);
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops macronix_nand_manuf_ops = {
+ .init = macronix_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
new file mode 100644
index 000000000..c01928819
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include <linux/slab.h>
+
+#include "internals.h"
+
+/*
+ * Special Micron status bit 3 indicates that the block has been
+ * corrected by on-die ECC and should be rewritten.
+ */
+#define NAND_ECC_STATUS_WRITE_RECOMMENDED BIT(3)
+
+/*
+ * On chips with 8-bit ECC and additional bit can be used to distinguish
+ * cases where a errors were corrected without needing a rewrite
+ *
+ * Bit 4 Bit 3 Bit 0 Description
+ * ----- ----- ----- -----------
+ * 0 0 0 No Errors
+ * 0 0 1 Multiple uncorrected errors
+ * 0 1 0 4 - 6 errors corrected, recommend rewrite
+ * 0 1 1 Reserved
+ * 1 0 0 1 - 3 errors corrected
+ * 1 0 1 Reserved
+ * 1 1 0 7 - 8 errors corrected, recommend rewrite
+ */
+#define NAND_ECC_STATUS_MASK (BIT(4) | BIT(3) | BIT(0))
+#define NAND_ECC_STATUS_UNCORRECTABLE BIT(0)
+#define NAND_ECC_STATUS_4_6_CORRECTED BIT(3)
+#define NAND_ECC_STATUS_1_3_CORRECTED BIT(4)
+#define NAND_ECC_STATUS_7_8_CORRECTED (BIT(4) | BIT(3))
+
+struct nand_onfi_vendor_micron {
+ u8 two_plane_read;
+ u8 read_cache;
+ u8 read_unique_id;
+ u8 dq_imped;
+ u8 dq_imped_num_settings;
+ u8 dq_imped_feat_addr;
+ u8 rb_pulldown_strength;
+ u8 rb_pulldown_strength_feat_addr;
+ u8 rb_pulldown_strength_num_settings;
+ u8 otp_mode;
+ u8 otp_page_start;
+ u8 otp_data_prot_addr;
+ u8 otp_num_pages;
+ u8 otp_feat_addr;
+ u8 read_retry_options;
+ u8 reserved[72];
+ u8 param_revision;
+} __packed;
+
+struct micron_on_die_ecc {
+ bool forced;
+ bool enabled;
+ void *rawbuf;
+};
+
+struct micron_nand {
+ struct micron_on_die_ecc ecc;
+};
+
+static int micron_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+ return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static int micron_nand_onfi_init(struct nand_chip *chip)
+{
+ struct nand_parameters *p = &chip->parameters;
+
+ if (p->onfi) {
+ struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor;
+
+ chip->read_retries = micron->read_retry_options;
+ chip->ops.setup_read_retry = micron_nand_setup_read_retry;
+ }
+
+ if (p->supports_set_get_features) {
+ set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
+ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
+ set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
+ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
+ }
+
+ return 0;
+}
+
+static int micron_nand_on_die_4_ooblayout_ecc(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section >= 4)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static int micron_nand_on_die_4_ooblayout_free(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section >= 4)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_nand_on_die_4_ooblayout_ops = {
+ .ecc = micron_nand_on_die_4_ooblayout_ecc,
+ .free = micron_nand_on_die_4_ooblayout_free,
+};
+
+static int micron_nand_on_die_8_ooblayout_ecc(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = mtd->oobsize - chip->ecc.total;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int micron_nand_on_die_8_ooblayout_free(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = mtd->oobsize - chip->ecc.total - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_nand_on_die_8_ooblayout_ops = {
+ .ecc = micron_nand_on_die_8_ooblayout_ecc,
+ .free = micron_nand_on_die_8_ooblayout_free,
+};
+
+static int micron_nand_on_die_ecc_setup(struct nand_chip *chip, bool enable)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, };
+ int ret;
+
+ if (micron->ecc.forced)
+ return 0;
+
+ if (micron->ecc.enabled == enable)
+ return 0;
+
+ if (enable)
+ feature[0] |= ONFI_FEATURE_ON_DIE_ECC_EN;
+
+ ret = nand_set_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
+ if (!ret)
+ micron->ecc.enabled = enable;
+
+ return ret;
+}
+
+static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status,
+ void *buf, int page,
+ int oob_required)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int step, max_bitflips = 0;
+ bool use_datain = false;
+ int ret;
+
+ if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) {
+ if (status & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+ }
+
+ /*
+ * The internal ECC doesn't tell us the number of bitflips that have
+ * been corrected, but tells us if it recommends to rewrite the block.
+ * If it's the case, we need to read the page in raw mode and compare
+ * its content to the corrected version to extract the actual number of
+ * bitflips.
+ * But before we do that, we must make sure we have all OOB bytes read
+ * in non-raw mode, even if the user did not request those bytes.
+ */
+ if (!oob_required) {
+ /*
+ * We first check which operation is supported by the controller
+ * before running it. This trick makes it possible to support
+ * all controllers, even the most constraints, without almost
+ * any performance hit.
+ *
+ * TODO: could be enhanced to avoid repeating the same check
+ * over and over in the fast path.
+ */
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+ true))
+ use_datain = true;
+
+ if (use_datain)
+ ret = nand_read_data_op(chip, chip->oob_poi,
+ mtd->oobsize, false, false);
+ else
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+ if (ret)
+ return ret;
+ }
+
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ ret = nand_read_page_op(chip, page, 0, micron->ecc.rawbuf,
+ mtd->writesize + mtd->oobsize);
+ if (ret)
+ return ret;
+
+ for (step = 0; step < chip->ecc.steps; step++) {
+ unsigned int offs, i, nbitflips = 0;
+ u8 *rawbuf, *corrbuf;
+
+ offs = step * chip->ecc.size;
+ rawbuf = micron->ecc.rawbuf + offs;
+ corrbuf = buf + offs;
+
+ for (i = 0; i < chip->ecc.size; i++)
+ nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
+
+ offs = (step * 16) + 4;
+ rawbuf = micron->ecc.rawbuf + mtd->writesize + offs;
+ corrbuf = chip->oob_poi + offs;
+
+ for (i = 0; i < chip->ecc.bytes + 4; i++)
+ nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
+
+ if (WARN_ON(nbitflips > chip->ecc.strength))
+ return -EINVAL;
+
+ max_bitflips = max(nbitflips, max_bitflips);
+ mtd->ecc_stats.corrected += nbitflips;
+ }
+
+ return max_bitflips;
+}
+
+static int micron_nand_on_die_ecc_status_8(struct nand_chip *chip, u8 status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /*
+ * With 8/512 we have more information but still don't know precisely
+ * how many bit-flips were seen.
+ */
+ switch (status & NAND_ECC_STATUS_MASK) {
+ case NAND_ECC_STATUS_UNCORRECTABLE:
+ mtd->ecc_stats.failed++;
+ return 0;
+ case NAND_ECC_STATUS_1_3_CORRECTED:
+ mtd->ecc_stats.corrected += 3;
+ return 3;
+ case NAND_ECC_STATUS_4_6_CORRECTED:
+ mtd->ecc_stats.corrected += 6;
+ /* rewrite recommended */
+ return 6;
+ case NAND_ECC_STATUS_7_8_CORRECTED:
+ mtd->ecc_stats.corrected += 8;
+ /* rewrite recommended */
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+static int
+micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ bool use_datain = false;
+ u8 status;
+ int ret, max_bitflips = 0;
+
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ goto out;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto out;
+
+ /*
+ * We first check which operation is supported by the controller before
+ * running it. This trick makes it possible to support all controllers,
+ * even the most constraints, without almost any performance hit.
+ *
+ * TODO: could be enhanced to avoid repeating the same check over and
+ * over in the fast path.
+ */
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, buf, mtd->writesize, false, true))
+ use_datain = true;
+
+ if (use_datain) {
+ ret = nand_exit_status_op(chip);
+ if (ret)
+ goto out;
+
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false,
+ false);
+ if (!ret && oob_required)
+ ret = nand_read_data_op(chip, chip->oob_poi,
+ mtd->oobsize, false, false);
+ } else {
+ ret = nand_change_read_column_op(chip, 0, buf, mtd->writesize,
+ false);
+ if (!ret && oob_required)
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+ }
+
+ if (chip->ecc.strength == 4)
+ max_bitflips = micron_nand_on_die_ecc_status_4(chip, status,
+ buf, page,
+ oob_required);
+ else
+ max_bitflips = micron_nand_on_die_ecc_status_8(chip, status);
+
+out:
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ return ret ? ret : max_bitflips;
+}
+
+static int
+micron_nand_write_page_on_die_ecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_write_page_raw(chip, buf, oob_required, page);
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ return ret;
+}
+
+enum {
+ /* The NAND flash doesn't support on-die ECC */
+ MICRON_ON_DIE_UNSUPPORTED,
+
+ /*
+ * The NAND flash supports on-die ECC and it can be
+ * enabled/disabled by a set features command.
+ */
+ MICRON_ON_DIE_SUPPORTED,
+
+ /*
+ * The NAND flash supports on-die ECC, and it cannot be
+ * disabled.
+ */
+ MICRON_ON_DIE_MANDATORY,
+};
+
+#define MICRON_ID_INTERNAL_ECC_MASK GENMASK(1, 0)
+#define MICRON_ID_ECC_ENABLED BIT(7)
+
+/*
+ * Try to detect if the NAND support on-die ECC. To do this, we enable
+ * the feature, and read back if it has been enabled as expected. We
+ * also check if it can be disabled, because some Micron NANDs do not
+ * allow disabling the on-die ECC and we don't support such NANDs for
+ * now.
+ *
+ * This function also has the side effect of disabling on-die ECC if
+ * it had been left enabled by the firmware/bootloader.
+ */
+static int micron_supports_on_die_ecc(struct nand_chip *chip)
+{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ u8 id[5];
+ int ret;
+
+ if (!chip->parameters.onfi)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ if (nanddev_bits_per_cell(&chip->base) != 1)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /*
+ * We only support on-die ECC of 4/512 or 8/512
+ */
+ if (requirements->strength != 4 && requirements->strength != 8)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /* 0x2 means on-die ECC is available. */
+ if (chip->id.len != 5 ||
+ (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /*
+ * It seems that there are devices which do not support ECC officially.
+ * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
+ * enabling the ECC feature but don't reflect that to the READ_ID table.
+ * So we have to guarantee that we disable the ECC feature directly
+ * after we did the READ_ID table command. Later we can evaluate the
+ * ECC_ENABLE support.
+ */
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = micron_nand_on_die_ecc_setup(chip, false);
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ if (!(id[4] & MICRON_ID_ECC_ENABLED))
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ if (id[4] & MICRON_ID_ECC_ENABLED)
+ return MICRON_ON_DIE_MANDATORY;
+
+ /*
+ * We only support on-die ECC of 4/512 or 8/512
+ */
+ if (requirements->strength != 4 && requirements->strength != 8)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ return MICRON_ON_DIE_SUPPORTED;
+}
+
+static int micron_nand_init(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(base);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct micron_nand *micron;
+ int ondie;
+ int ret;
+
+ micron = kzalloc(sizeof(*micron), GFP_KERNEL);
+ if (!micron)
+ return -ENOMEM;
+
+ nand_set_manufacturer_data(chip, micron);
+
+ ret = micron_nand_onfi_init(chip);
+ if (ret)
+ goto err_free_manuf_data;
+
+ chip->options |= NAND_BBM_FIRSTPAGE;
+
+ if (mtd->writesize == 2048)
+ chip->options |= NAND_BBM_SECONDPAGE;
+
+ ondie = micron_supports_on_die_ecc(chip);
+
+ if (ondie == MICRON_ON_DIE_MANDATORY &&
+ chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_DIE) {
+ pr_err("On-die ECC forcefully enabled, not supported\n");
+ ret = -EINVAL;
+ goto err_free_manuf_data;
+ }
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE) {
+ if (ondie == MICRON_ON_DIE_UNSUPPORTED) {
+ pr_err("On-die ECC selected but not supported\n");
+ ret = -EINVAL;
+ goto err_free_manuf_data;
+ }
+
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ micron->ecc.forced = true;
+ micron->ecc.enabled = true;
+ }
+
+ /*
+ * In case of 4bit on-die ECC, we need a buffer to store a
+ * page dumped in raw mode so that we can compare its content
+ * to the same page after ECC correction happened and extract
+ * the real number of bitflips from this comparison.
+ * That's not needed for 8-bit ECC, because the status expose
+ * a better approximation of the number of bitflips in a page.
+ */
+ if (requirements->strength == 4) {
+ micron->ecc.rawbuf = kmalloc(mtd->writesize +
+ mtd->oobsize,
+ GFP_KERNEL);
+ if (!micron->ecc.rawbuf) {
+ ret = -ENOMEM;
+ goto err_free_manuf_data;
+ }
+ }
+
+ if (requirements->strength == 4)
+ mtd_set_ooblayout(mtd,
+ &micron_nand_on_die_4_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd,
+ &micron_nand_on_die_8_ooblayout_ops);
+
+ chip->ecc.bytes = requirements->strength * 2;
+ chip->ecc.size = 512;
+ chip->ecc.strength = requirements->strength;
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
+ chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
+
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
+ chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
+ } else {
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ }
+ }
+
+ return 0;
+
+err_free_manuf_data:
+ kfree(micron->ecc.rawbuf);
+ kfree(micron);
+
+ return ret;
+}
+
+static void micron_nand_cleanup(struct nand_chip *chip)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+
+ kfree(micron->ecc.rawbuf);
+ kfree(micron);
+}
+
+static void micron_fixup_onfi_param_page(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ /*
+ * MT29F1G08ABAFAWP-ITE:F and possibly others report 00 00 for the
+ * revision number field of the ONFI parameter page. Assume ONFI
+ * version 1.0 if the revision number is 00 00.
+ */
+ if (le16_to_cpu(p->revision) == 0)
+ p->revision = cpu_to_le16(ONFI_VERSION_1_0);
+}
+
+const struct nand_manufacturer_ops micron_nand_manuf_ops = {
+ .init = micron_nand_init,
+ .cleanup = micron_nand_cleanup,
+ .fixup_onfi_param_page = micron_fixup_onfi_param_page,
+};
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
new file mode 100644
index 000000000..45649e037
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all ONFI helpers.
+ */
+
+#include <linux/slab.h>
+
+#include "internals.h"
+
+#define ONFI_PARAM_PAGES 3
+
+u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 8;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements;
+ struct onfi_ext_param_page *ep;
+ struct onfi_ext_section *s;
+ struct onfi_ext_ecc_info *ecc;
+ uint8_t *cursor;
+ int ret;
+ int len;
+ int i;
+
+ len = le16_to_cpu(p->ext_param_page_length) * 16;
+ ep = kmalloc(len, GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ /*
+ * Use the Change Read Column command to skip the ONFI param pages and
+ * ensure we read at the right location.
+ */
+ ret = nand_change_read_column_op(chip,
+ sizeof(*p) * p->num_of_param_pages,
+ ep, len, true);
+ if (ret)
+ goto ext_out;
+
+ ret = -EINVAL;
+ if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+ != le16_to_cpu(ep->crc))) {
+ pr_debug("fail in the CRC.\n");
+ goto ext_out;
+ }
+
+ /*
+ * Check the signature.
+ * Do not strictly follow the ONFI spec, maybe changed in future.
+ */
+ if (strncmp(ep->sig, "EPPS", 4)) {
+ pr_debug("The signature is invalid.\n");
+ goto ext_out;
+ }
+
+ /* find the ECC section. */
+ cursor = (uint8_t *)(ep + 1);
+ for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+ s = ep->sections + i;
+ if (s->type == ONFI_SECTION_TYPE_2)
+ break;
+ cursor += s->length * 16;
+ }
+ if (i == ONFI_EXT_SECTION_MAX) {
+ pr_debug("We can not find the ECC section.\n");
+ goto ext_out;
+ }
+
+ /* get the info we want. */
+ ecc = (struct onfi_ext_ecc_info *)cursor;
+
+ if (!ecc->codeword_size) {
+ pr_debug("Invalid codeword size\n");
+ goto ext_out;
+ }
+
+ requirements.strength = ecc->ecc_bits;
+ requirements.step_size = 1 << ecc->codeword_size;
+ nanddev_set_ecc_requirements(base, &requirements);
+
+ ret = 0;
+
+ext_out:
+ kfree(ep);
+ return ret;
+}
+
+/*
+ * Recover data with bit-wise majority
+ */
+static void nand_bit_wise_majority(const void **srcbufs,
+ unsigned int nsrcbufs,
+ void *dstbuf,
+ unsigned int bufsize)
+{
+ int i, j, k;
+
+ for (i = 0; i < bufsize; i++) {
+ u8 val = 0;
+
+ for (j = 0; j < 8; j++) {
+ unsigned int cnt = 0;
+
+ for (k = 0; k < nsrcbufs; k++) {
+ const u8 *srcbuf = srcbufs[k];
+
+ if (srcbuf[i] & BIT(j))
+ cnt++;
+ }
+
+ if (cnt > nsrcbufs / 2)
+ val |= BIT(j);
+ }
+
+ ((u8 *)dstbuf)[i] = val;
+ }
+}
+
+/*
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
+ */
+int nand_onfi_detect(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ struct nand_onfi_params *p = NULL, *pbuf;
+ struct onfi_params *onfi;
+ bool use_datain = false;
+ int onfi_version = 0;
+ char id[4];
+ int i, ret, val;
+ u16 crc;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* Try ONFI for unknown chip or LP */
+ ret = nand_readid_op(chip, 0x20, id, sizeof(id));
+ if (ret || strncmp(id, "ONFI", 4))
+ return 0;
+
+ /* ONFI chip: allocate a buffer to hold its parameter page */
+ pbuf = kzalloc((sizeof(*pbuf) * ONFI_PARAM_PAGES), GFP_KERNEL);
+ if (!pbuf)
+ return -ENOMEM;
+
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, &pbuf[0], sizeof(*pbuf), true, true))
+ use_datain = true;
+
+ for (i = 0; i < ONFI_PARAM_PAGES; i++) {
+ if (!i)
+ ret = nand_read_param_page_op(chip, 0, &pbuf[i],
+ sizeof(*pbuf));
+ else if (use_datain)
+ ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf),
+ true, false);
+ else
+ ret = nand_change_read_column_op(chip, sizeof(*pbuf) * i,
+ &pbuf[i], sizeof(*pbuf),
+ true);
+ if (ret) {
+ ret = 0;
+ goto free_onfi_param_page;
+ }
+
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&pbuf[i], 254);
+ if (crc == le16_to_cpu(pbuf[i].crc)) {
+ p = &pbuf[i];
+ break;
+ }
+ }
+
+ if (i == ONFI_PARAM_PAGES) {
+ const void *srcbufs[ONFI_PARAM_PAGES];
+ unsigned int j;
+
+ for (j = 0; j < ONFI_PARAM_PAGES; j++)
+ srcbufs[j] = pbuf + j;
+
+ pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
+ nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, pbuf,
+ sizeof(*pbuf));
+
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)pbuf, 254);
+ if (crc != le16_to_cpu(pbuf->crc)) {
+ pr_err("ONFI parameter recovery failed, aborting\n");
+ goto free_onfi_param_page;
+ }
+ p = pbuf;
+ }
+
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->fixup_onfi_param_page)
+ chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p);
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & ONFI_VERSION_2_3)
+ onfi_version = 23;
+ else if (val & ONFI_VERSION_2_2)
+ onfi_version = 22;
+ else if (val & ONFI_VERSION_2_1)
+ onfi_version = 21;
+ else if (val & ONFI_VERSION_2_0)
+ onfi_version = 20;
+ else if (val & ONFI_VERSION_1_0)
+ onfi_version = 10;
+
+ if (!onfi_version) {
+ pr_info("unsupported ONFI version: %d\n", val);
+ goto free_onfi_param_page;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_onfi_param_page;
+ }
+
+ memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ mtd->writesize = memorg->pagesize;
+
+ /*
+ * pages_per_block and blocks_per_lun may not be a power-of-2 size
+ * (don't ask me who thought of this...). MTD assumes that these
+ * dimensions will be power-of-2, so just truncate the remaining area.
+ */
+ memorg->pages_per_eraseblock =
+ 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
+
+ memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->luns_per_target = p->lun_count;
+ memorg->planes_per_lun = 1 << p->interleaved_bits;
+
+ /* See erasesize comment */
+ memorg->eraseblocks_per_lun =
+ 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ memorg->max_bad_eraseblocks_per_lun = le32_to_cpu(p->blocks_per_lun);
+ memorg->bits_per_cell = p->bits_per_cell;
+
+ if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (p->ecc_bits != 0xff) {
+ struct nand_ecc_props requirements = {
+ .strength = p->ecc_bits,
+ .step_size = 512,
+ };
+
+ nanddev_set_ecc_requirements(base, &requirements);
+ } else if (onfi_version >= 21 &&
+ (le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+ /*
+ * The nand_flash_detect_ext_param_page() uses the
+ * Change Read Column command which maybe not supported
+ * by the chip->legacy.cmdfunc. So try to update the
+ * chip->legacy.cmdfunc now. We do not replace user supplied
+ * command function.
+ */
+ nand_legacy_adjust_cmdfunc(chip);
+
+ /* The Extended Parameter Page is supported since ONFI 2.1. */
+ if (nand_flash_detect_ext_param_page(chip, p))
+ pr_warn("Failed to detect ONFI extended param page\n");
+ } else {
+ pr_warn("Could not retrieve ONFI ECC requirements\n");
+ }
+
+ /* Save some parameters from the parameter page for future use */
+ if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) {
+ chip->parameters.supports_set_get_features = true;
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ }
+
+ onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
+ if (!onfi) {
+ ret = -ENOMEM;
+ goto free_model;
+ }
+
+ onfi->version = onfi_version;
+ onfi->tPROG = le16_to_cpu(p->t_prog);
+ onfi->tBERS = le16_to_cpu(p->t_bers);
+ onfi->tR = le16_to_cpu(p->t_r);
+ onfi->tCCS = le16_to_cpu(p->t_ccs);
+ onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
+ onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
+ memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
+ chip->parameters.onfi = onfi;
+
+ /* Identification done, free the full ONFI parameter page and exit */
+ kfree(pbuf);
+
+ return 1;
+
+free_model:
+ kfree(chip->parameters.model);
+free_onfi_param_page:
+ kfree(pbuf);
+
+ return ret;
+}
diff --git a/drivers/mtd/nand/raw/nand_samsung.c b/drivers/mtd/nand/raw/nand_samsung.c
new file mode 100644
index 000000000..0be6b7563
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_samsung.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include "internals.h"
+
+static void samsung_nand_decode_id(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) */
+ if (chip->id.len == 6 && !nand_is_slc(chip) &&
+ chip->id.data[5] != 0x00) {
+ u8 extid = chip->id.data[3];
+
+ /* Get pagesize */
+ memorg->pagesize = 2048 << (extid & 0x03);
+ mtd->writesize = memorg->pagesize;
+
+ extid >>= 2;
+
+ /* Get oobsize */
+ switch (((extid >> 2) & 0x4) | (extid & 0x3)) {
+ case 1:
+ memorg->oobsize = 128;
+ break;
+ case 2:
+ memorg->oobsize = 218;
+ break;
+ case 3:
+ memorg->oobsize = 400;
+ break;
+ case 4:
+ memorg->oobsize = 436;
+ break;
+ case 5:
+ memorg->oobsize = 512;
+ break;
+ case 6:
+ memorg->oobsize = 640;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Samsung decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid OOB size value");
+ break;
+ }
+
+ mtd->oobsize = memorg->oobsize;
+
+ /* Get blocksize */
+ extid >>= 2;
+ memorg->pages_per_eraseblock = (128 * 1024) <<
+ (((extid >> 1) & 0x04) |
+ (extid & 0x03)) /
+ memorg->pagesize;
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+
+ /* Extract ECC requirements from 5th id byte*/
+ extid = (chip->id.data[4] >> 4) & 0x07;
+ if (extid < 5) {
+ requirements.step_size = 512;
+ requirements.strength = 1 << extid;
+ } else {
+ requirements.step_size = 1024;
+ switch (extid) {
+ case 5:
+ requirements.strength = 24;
+ break;
+ case 6:
+ requirements.strength = 40;
+ break;
+ case 7:
+ requirements.strength = 60;
+ break;
+ default:
+ WARN(1, "Could not decode ECC info");
+ requirements.step_size = 0;
+ }
+ }
+ } else {
+ nand_decode_ext_id(chip);
+
+ if (nand_is_slc(chip)) {
+ switch (chip->id.data[1]) {
+ /* K9F4G08U0D-S[I|C]B0(T00) */
+ case 0xDC:
+ requirements.step_size = 512;
+ requirements.strength = 1;
+ break;
+
+ /* K9F1G08U0E 21nm chips do not support subpage write */
+ case 0xF1:
+ if (chip->id.len > 4 &&
+ (chip->id.data[4] & GENMASK(1, 0)) == 0x1)
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ nanddev_set_ecc_requirements(base, &requirements);
+}
+
+static int samsung_nand_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (mtd->writesize > 512)
+ chip->options |= NAND_SAMSUNG_LP_OPTIONS;
+
+ if (!nand_is_slc(chip))
+ chip->options |= NAND_BBM_LASTPAGE;
+ else
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops samsung_nand_manuf_ops = {
+ .detect = samsung_nand_decode_id,
+ .init = samsung_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
new file mode 100644
index 000000000..481b56d5f
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include "internals.h"
+
+#define ONFI_DYN_TIMING_MAX U16_MAX
+
+/*
+ * For non-ONFI chips we use the highest possible value for tPROG and tBERS.
+ * tR and tCCS will take the default values precised in the ONFI specification
+ * for timing mode 0, respectively 200us and 500ns.
+ *
+ * These four values are tweaked to be more accurate in the case of ONFI chips.
+ */
+static const struct nand_interface_config onfi_sdr_timings[] = {
+ /* Mode 0 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 0,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 20000,
+ .tALS_min = 50000,
+ .tAR_min = 25000,
+ .tCEA_max = 100000,
+ .tCEH_min = 20000,
+ .tCH_min = 20000,
+ .tCHZ_max = 100000,
+ .tCLH_min = 20000,
+ .tCLR_min = 20000,
+ .tCLS_min = 50000,
+ .tCOH_min = 0,
+ .tCS_min = 70000,
+ .tDH_min = 20000,
+ .tDS_min = 40000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 10000,
+ .tITC_max = 1000000,
+ .tRC_min = 100000,
+ .tREA_max = 40000,
+ .tREH_min = 30000,
+ .tRHOH_min = 0,
+ .tRHW_min = 200000,
+ .tRHZ_max = 200000,
+ .tRLOH_min = 0,
+ .tRP_min = 50000,
+ .tRR_min = 40000,
+ .tRST_max = 250000000000ULL,
+ .tWB_max = 200000,
+ .tWC_min = 100000,
+ .tWH_min = 30000,
+ .tWHR_min = 120000,
+ .tWP_min = 50000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 1 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 1,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 10000,
+ .tALS_min = 25000,
+ .tAR_min = 10000,
+ .tCEA_max = 45000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 25000,
+ .tCOH_min = 15000,
+ .tCS_min = 35000,
+ .tDH_min = 10000,
+ .tDS_min = 20000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 50000,
+ .tREA_max = 30000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 25000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 45000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 25000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 2 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 2,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 10000,
+ .tALS_min = 15000,
+ .tAR_min = 10000,
+ .tCEA_max = 30000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 15000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 15000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 35000,
+ .tREA_max = 25000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tRP_min = 17000,
+ .tWC_min = 35000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 17000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 3 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 3,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 30000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 15000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 30000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 15000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 4 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 4,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 20000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 25000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 12000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 25000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 12000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 5 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 5,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 5000,
+ .tDS_min = 7000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 20000,
+ .tREA_max = 16000,
+ .tREH_min = 7000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 10000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 20000,
+ .tWH_min = 7000,
+ .tWHR_min = 80000,
+ .tWP_min = 10000,
+ .tWW_min = 100000,
+ },
+ },
+};
+
+static const struct nand_interface_config onfi_nvddr_timings[] = {
+ /* Mode 0 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 0,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 10000,
+ .tCALH_min = 10000,
+ .tCALS_min = 10000,
+ .tCAS_min = 10000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCK_min = 50000,
+ .tCS_min = 35000,
+ .tDH_min = 5000,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 5000,
+ .tDS_min = 5000,
+ .tDSC_min = 50000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 6000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 1 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 1,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 5000,
+ .tCALH_min = 5000,
+ .tCALS_min = 5000,
+ .tCAS_min = 5000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCK_min = 30000,
+ .tCS_min = 25000,
+ .tDH_min = 2500,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 2500,
+ .tDS_min = 3000,
+ .tDSC_min = 30000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 3000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 2 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 2,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 4000,
+ .tCALH_min = 4000,
+ .tCALS_min = 4000,
+ .tCAS_min = 4000,
+ .tCEH_min = 20000,
+ .tCH_min = 4000,
+ .tCK_min = 20000,
+ .tCS_min = 15000,
+ .tDH_min = 1700,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1700,
+ .tDS_min = 2000,
+ .tDSC_min = 20000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 2000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 3 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 3,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 3000,
+ .tCALH_min = 3000,
+ .tCALS_min = 3000,
+ .tCAS_min = 3000,
+ .tCEH_min = 20000,
+ .tCH_min = 3000,
+ .tCK_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 1300,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1300,
+ .tDS_min = 1500,
+ .tDSC_min = 15000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1500,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 4 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 4,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 2500,
+ .tCALH_min = 2500,
+ .tCALS_min = 2500,
+ .tCAS_min = 2500,
+ .tCEH_min = 20000,
+ .tCH_min = 2500,
+ .tCK_min = 12000,
+ .tCS_min = 15000,
+ .tDH_min = 1100,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1000,
+ .tDS_min = 1100,
+ .tDSC_min = 12000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1200,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 5 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 5,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 2000,
+ .tCALH_min = 2000,
+ .tCALS_min = 2000,
+ .tCAS_min = 2000,
+ .tCEH_min = 20000,
+ .tCH_min = 2000,
+ .tCK_min = 10000,
+ .tCS_min = 15000,
+ .tDH_min = 900,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 850,
+ .tDS_min = 900,
+ .tDSC_min = 10000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+};
+
+/* All NAND chips share the same reset data interface: SDR mode 0 */
+const struct nand_interface_config *nand_get_reset_interface_config(void)
+{
+ return &onfi_sdr_timings[0];
+}
+
+/**
+ * onfi_find_closest_sdr_mode - Derive the closest ONFI SDR timing mode given a
+ * set of timings
+ * @spec_timings: the timings to challenge
+ */
+unsigned int
+onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings)
+{
+ const struct nand_sdr_timings *onfi_timings;
+ int mode;
+
+ for (mode = ARRAY_SIZE(onfi_sdr_timings) - 1; mode > 0; mode--) {
+ onfi_timings = &onfi_sdr_timings[mode].timings.sdr;
+
+ if (spec_timings->tCCS_min <= onfi_timings->tCCS_min &&
+ spec_timings->tADL_min <= onfi_timings->tADL_min &&
+ spec_timings->tALH_min <= onfi_timings->tALH_min &&
+ spec_timings->tALS_min <= onfi_timings->tALS_min &&
+ spec_timings->tAR_min <= onfi_timings->tAR_min &&
+ spec_timings->tCEH_min <= onfi_timings->tCEH_min &&
+ spec_timings->tCH_min <= onfi_timings->tCH_min &&
+ spec_timings->tCLH_min <= onfi_timings->tCLH_min &&
+ spec_timings->tCLR_min <= onfi_timings->tCLR_min &&
+ spec_timings->tCLS_min <= onfi_timings->tCLS_min &&
+ spec_timings->tCOH_min <= onfi_timings->tCOH_min &&
+ spec_timings->tCS_min <= onfi_timings->tCS_min &&
+ spec_timings->tDH_min <= onfi_timings->tDH_min &&
+ spec_timings->tDS_min <= onfi_timings->tDS_min &&
+ spec_timings->tIR_min <= onfi_timings->tIR_min &&
+ spec_timings->tRC_min <= onfi_timings->tRC_min &&
+ spec_timings->tREH_min <= onfi_timings->tREH_min &&
+ spec_timings->tRHOH_min <= onfi_timings->tRHOH_min &&
+ spec_timings->tRHW_min <= onfi_timings->tRHW_min &&
+ spec_timings->tRLOH_min <= onfi_timings->tRLOH_min &&
+ spec_timings->tRP_min <= onfi_timings->tRP_min &&
+ spec_timings->tRR_min <= onfi_timings->tRR_min &&
+ spec_timings->tWC_min <= onfi_timings->tWC_min &&
+ spec_timings->tWH_min <= onfi_timings->tWH_min &&
+ spec_timings->tWHR_min <= onfi_timings->tWHR_min &&
+ spec_timings->tWP_min <= onfi_timings->tWP_min &&
+ spec_timings->tWW_min <= onfi_timings->tWW_min)
+ return mode;
+ }
+
+ return 0;
+}
+
+/**
+ * onfi_fill_interface_config - Initialize an interface config from a given
+ * ONFI mode
+ * @chip: The NAND chip
+ * @iface: The interface configuration to fill
+ * @type: The interface type
+ * @timing_mode: The ONFI timing mode
+ */
+void onfi_fill_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ enum nand_interface_type type,
+ unsigned int timing_mode)
+{
+ struct onfi_params *onfi = chip->parameters.onfi;
+
+ if (WARN_ON(type != NAND_SDR_IFACE))
+ return;
+
+ if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_sdr_timings)))
+ return;
+
+ *iface = onfi_sdr_timings[timing_mode];
+
+ /*
+ * Initialize timings that cannot be deduced from timing mode:
+ * tPROG, tBERS, tR and tCCS.
+ * These information are part of the ONFI parameter page.
+ */
+ if (onfi) {
+ struct nand_sdr_timings *timings = &iface->timings.sdr;
+
+ /* microseconds -> picoseconds */
+ timings->tPROG_max = 1000000ULL * onfi->tPROG;
+ timings->tBERS_max = 1000000ULL * onfi->tBERS;
+ timings->tR_max = 1000000ULL * onfi->tR;
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * onfi->tCCS;
+ }
+}
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
new file mode 100644
index 000000000..cf4f37959
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include "internals.h"
+
+/* Bit for detecting BENAND */
+#define TOSHIBA_NAND_ID4_IS_BENAND BIT(7)
+
+/* Recommended to rewrite for BENAND */
+#define TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED BIT(3)
+
+/* ECC Status Read Command for BENAND */
+#define TOSHIBA_NAND_CMD_ECC_STATUS_READ 0x7A
+
+/* ECC Status Mask for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_MASK 0x0F
+
+/* Uncorrectable Error for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_UNCORR 0x0F
+
+/* Max ECC Steps for BENAND */
+#define TOSHIBA_NAND_MAX_ECC_STEPS 8
+
+static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip,
+ u8 *buf)
+{
+ u8 *ecc_status = buf;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(chip->ecc.steps, ecc_status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ return -ENOTSUPP;
+}
+
+static int toshiba_nand_benand_eccstatus(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+ unsigned int max_bitflips = 0;
+ u8 status, ecc_status[TOSHIBA_NAND_MAX_ECC_STEPS];
+
+ /* Check Status */
+ ret = toshiba_nand_benand_read_eccstatus_op(chip, ecc_status);
+ if (!ret) {
+ unsigned int i, bitflips = 0;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ bitflips = ecc_status[i] & TOSHIBA_NAND_ECC_STATUS_MASK;
+ if (bitflips == TOSHIBA_NAND_ECC_STATUS_UNCORR) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bitflips;
+ max_bitflips = max(max_bitflips, bitflips);
+ }
+ }
+
+ return max_bitflips;
+ }
+
+ /*
+ * Fallback to regular status check if
+ * toshiba_nand_benand_read_eccstatus_op() failed.
+ */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL) {
+ /* uncorrected */
+ mtd->ecc_stats.failed++;
+ } else if (status & TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED) {
+ /* corrected */
+ max_bitflips = mtd->bitflip_threshold;
+ mtd->ecc_stats.corrected += max_bitflips;
+ }
+
+ return max_bitflips;
+}
+
+static int
+toshiba_nand_read_page_benand(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = nand_read_page_raw(chip, buf, oob_required, page);
+ if (ret)
+ return ret;
+
+ return toshiba_nand_benand_eccstatus(chip);
+}
+
+static int
+toshiba_nand_read_subpage_benand(struct nand_chip *chip, uint32_t data_offs,
+ uint32_t readlen, uint8_t *bufpoi, int page)
+{
+ int ret;
+
+ ret = nand_read_page_op(chip, page, data_offs,
+ bufpoi + data_offs, readlen);
+ if (ret)
+ return ret;
+
+ return toshiba_nand_benand_eccstatus(chip);
+}
+
+static void toshiba_nand_benand_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /*
+ * On BENAND, the entire OOB region can be used by the MTD user.
+ * The calculated ECC bytes are stored into other isolated
+ * area which is not accessible to users.
+ * This is why chip->ecc.bytes = 0.
+ */
+ chip->ecc.bytes = 0;
+ chip->ecc.size = 512;
+ chip->ecc.strength = 8;
+ chip->ecc.read_page = toshiba_nand_read_page_benand;
+ chip->ecc.read_subpage = toshiba_nand_read_subpage_benand;
+ chip->ecc.write_page = nand_write_page_raw;
+ chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
+ chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
+
+ chip->options |= NAND_SUBPAGE_READ;
+
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+}
+
+static void toshiba_nand_decode_id(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ nand_decode_ext_id(chip);
+
+ /*
+ * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
+ * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
+ * follows:
+ * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
+ * 110b -> 24nm
+ * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
+ */
+ if (chip->id.len >= 6 && nand_is_slc(chip) &&
+ (chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
+ !(chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) /* !BENAND */) {
+ memorg->oobsize = 32 * memorg->pagesize >> 9;
+ mtd->oobsize = memorg->oobsize;
+ }
+
+ /*
+ * Extract ECC requirements from 6th id byte.
+ * For Toshiba SLC, ecc requrements are as follows:
+ * - 43nm: 1 bit ECC for each 512Byte is required.
+ * - 32nm: 4 bit ECC for each 512Byte is required.
+ * - 24nm: 8 bit ECC for each 512Byte is required.
+ */
+ if (chip->id.len >= 6 && nand_is_slc(chip)) {
+ requirements.step_size = 512;
+ switch (chip->id.data[5] & 0x7) {
+ case 0x4:
+ requirements.strength = 1;
+ break;
+ case 0x5:
+ requirements.strength = 4;
+ break;
+ case 0x6:
+ requirements.strength = 8;
+ break;
+ default:
+ WARN(1, "Could not get ECC info");
+ requirements.step_size = 0;
+ break;
+ }
+ }
+
+ nanddev_set_ecc_requirements(base, &requirements);
+}
+
+static int
+tc58teg5dclta00_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 5);
+
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int
+tc58nvg0s3e_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 2);
+
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int
+th58nvg2s3hbai4_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ struct nand_sdr_timings *sdr = &iface->timings.sdr;
+
+ /* Start with timings from the closest timing mode, mode 4. */
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
+
+ /* Patch timings that differ from mode 4. */
+ sdr->tALS_min = 12000;
+ sdr->tCHZ_max = 20000;
+ sdr->tCLS_min = 12000;
+ sdr->tCOH_min = 0;
+ sdr->tDS_min = 12000;
+ sdr->tRHOH_min = 25000;
+ sdr->tRHW_min = 30000;
+ sdr->tRHZ_max = 60000;
+ sdr->tWHR_min = 60000;
+
+ /* Patch timings not part of onfi timing mode. */
+ sdr->tPROG_max = 700000000;
+ sdr->tBERS_max = 5000000000;
+
+ return nand_choose_best_sdr_timings(chip, iface, sdr);
+}
+
+static int tc58teg5dclta00_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ chip->ops.choose_interface_config =
+ &tc58teg5dclta00_choose_interface_config;
+ chip->options |= NAND_NEED_SCRAMBLING;
+ mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
+
+ return 0;
+}
+
+static int tc58nvg0s3e_init(struct nand_chip *chip)
+{
+ chip->ops.choose_interface_config =
+ &tc58nvg0s3e_choose_interface_config;
+
+ return 0;
+}
+
+static int th58nvg2s3hbai4_init(struct nand_chip *chip)
+{
+ chip->ops.choose_interface_config =
+ &th58nvg2s3hbai4_choose_interface_config;
+
+ return 0;
+}
+
+static int toshiba_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ /* Check that chip is BENAND and ECC mode is on-die */
+ if (nand_is_slc(chip) &&
+ chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
+ chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND)
+ toshiba_nand_benand_init(chip);
+
+ if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model))
+ tc58teg5dclta00_init(chip);
+ if (!strncmp("TC58NVG0S3E", chip->parameters.model,
+ sizeof("TC58NVG0S3E") - 1))
+ tc58nvg0s3e_init(chip);
+ if (!strncmp("TH58NVG2S3HBAI4", chip->parameters.model,
+ sizeof("TH58NVG2S3HBAI4") - 1))
+ th58nvg2s3hbai4_init(chip);
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops toshiba_nand_manuf_ops = {
+ .detect = toshiba_nand_decode_id,
+ .init = toshiba_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
new file mode 100644
index 000000000..9a9f1c24d
--- /dev/null
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -0,0 +1,2457 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NAND flash simulator.
+ *
+ * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ *
+ * Note: NS means "NAND Simulator".
+ * Note: Input means input TO flash chip, output means output FROM chip.
+ */
+
+#define pr_fmt(fmt) "[nandsim]" fmt
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+/* Default simulator parameters values */
+#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
+#define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
+#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
+#define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */
+#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
+#endif
+
+#ifndef CONFIG_NANDSIM_ACCESS_DELAY
+#define CONFIG_NANDSIM_ACCESS_DELAY 25
+#endif
+#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
+#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
+#endif
+#ifndef CONFIG_NANDSIM_ERASE_DELAY
+#define CONFIG_NANDSIM_ERASE_DELAY 2
+#endif
+#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
+#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
+#endif
+#ifndef CONFIG_NANDSIM_INPUT_CYCLE
+#define CONFIG_NANDSIM_INPUT_CYCLE 50
+#endif
+#ifndef CONFIG_NANDSIM_BUS_WIDTH
+#define CONFIG_NANDSIM_BUS_WIDTH 8
+#endif
+#ifndef CONFIG_NANDSIM_DO_DELAYS
+#define CONFIG_NANDSIM_DO_DELAYS 0
+#endif
+#ifndef CONFIG_NANDSIM_LOG
+#define CONFIG_NANDSIM_LOG 0
+#endif
+#ifndef CONFIG_NANDSIM_DBG
+#define CONFIG_NANDSIM_DBG 0
+#endif
+#ifndef CONFIG_NANDSIM_MAX_PARTS
+#define CONFIG_NANDSIM_MAX_PARTS 32
+#endif
+
+static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
+static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
+static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
+static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
+static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
+static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
+static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
+static uint log = CONFIG_NANDSIM_LOG;
+static uint dbg = CONFIG_NANDSIM_DBG;
+static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
+static unsigned int parts_num;
+static char *badblocks = NULL;
+static char *weakblocks = NULL;
+static char *weakpages = NULL;
+static unsigned int bitflips = 0;
+static char *gravepages = NULL;
+static unsigned int overridesize = 0;
+static char *cache_file = NULL;
+static unsigned int bbt;
+static unsigned int bch;
+static u_char id_bytes[8] = {
+ [0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
+ [1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
+ [2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
+ [3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
+ [4 ... 7] = 0xFF,
+};
+
+module_param_array(id_bytes, byte, NULL, 0400);
+module_param_named(first_id_byte, id_bytes[0], byte, 0400);
+module_param_named(second_id_byte, id_bytes[1], byte, 0400);
+module_param_named(third_id_byte, id_bytes[2], byte, 0400);
+module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
+module_param(access_delay, uint, 0400);
+module_param(programm_delay, uint, 0400);
+module_param(erase_delay, uint, 0400);
+module_param(output_cycle, uint, 0400);
+module_param(input_cycle, uint, 0400);
+module_param(bus_width, uint, 0400);
+module_param(do_delays, uint, 0400);
+module_param(log, uint, 0400);
+module_param(dbg, uint, 0400);
+module_param_array(parts, ulong, &parts_num, 0400);
+module_param(badblocks, charp, 0400);
+module_param(weakblocks, charp, 0400);
+module_param(weakpages, charp, 0400);
+module_param(bitflips, uint, 0400);
+module_param(gravepages, charp, 0400);
+module_param(overridesize, uint, 0400);
+module_param(cache_file, charp, 0400);
+module_param(bbt, uint, 0400);
+module_param(bch, uint, 0400);
+
+MODULE_PARM_DESC(id_bytes, "The ID bytes returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
+MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
+MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command (obsolete)");
+MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
+MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
+MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
+MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
+MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
+MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
+MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
+MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
+MODULE_PARM_DESC(log, "Perform logging if not zero");
+MODULE_PARM_DESC(dbg, "Output debug information if not zero");
+MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
+/* Page and erase block positions for the following parameters are independent of any partitions */
+MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
+MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
+ " separated by commas e.g. 113:2 means eb 113"
+ " can be erased only twice before failing");
+MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
+ " separated by commas e.g. 1401:2 means page 1401"
+ " can be written only twice before failing");
+MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
+MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
+ " separated by commas e.g. 1401:2 means page 1401"
+ " can be read only twice before failing");
+MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
+ "The size is specified in erase blocks and as the exponent of a power of two"
+ " e.g. 5 means a size of 32 erase blocks");
+MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
+MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
+MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
+ "be correctable in 512-byte blocks");
+
+/* The largest possible page size */
+#define NS_LARGEST_PAGE_SIZE 4096
+
+/* Simulator's output macros (logging, debugging, warning, error) */
+#define NS_LOG(args...) \
+ do { if (log) pr_debug(" log: " args); } while(0)
+#define NS_DBG(args...) \
+ do { if (dbg) pr_debug(" debug: " args); } while(0)
+#define NS_WARN(args...) \
+ do { pr_warn(" warning: " args); } while(0)
+#define NS_ERR(args...) \
+ do { pr_err(" error: " args); } while(0)
+#define NS_INFO(args...) \
+ do { pr_info(" " args); } while(0)
+
+/* Busy-wait delay macros (microseconds, milliseconds) */
+#define NS_UDELAY(us) \
+ do { if (do_delays) udelay(us); } while(0)
+#define NS_MDELAY(us) \
+ do { if (do_delays) mdelay(us); } while(0)
+
+/* Is the nandsim structure initialized ? */
+#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
+
+/* Good operation completion status */
+#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
+
+/* Operation failed completion status */
+#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
+
+/* Calculate the page offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET(ns) \
+ (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
+
+/* Calculate the OOB offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
+
+/* After a command is input, the simulator goes to one of the following states */
+#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
+#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
+#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
+#define STATE_CMD_PAGEPROG 0x00000004 /* start page program */
+#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
+#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
+#define STATE_CMD_STATUS 0x00000007 /* read status */
+#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
+#define STATE_CMD_READID 0x0000000A /* read ID */
+#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
+#define STATE_CMD_RESET 0x0000000C /* reset */
+#define STATE_CMD_RNDOUT 0x0000000D /* random output command */
+#define STATE_CMD_RNDOUTSTART 0x0000000E /* random output start command */
+#define STATE_CMD_MASK 0x0000000F /* command states mask */
+
+/* After an address is input, the simulator goes to one of these states */
+#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
+#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
+#define STATE_ADDR_COLUMN 0x00000030 /* column address was accepted */
+#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
+#define STATE_ADDR_MASK 0x00000070 /* address states mask */
+
+/* During data input/output the simulator is in these states */
+#define STATE_DATAIN 0x00000100 /* waiting for data input */
+#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
+
+#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
+#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
+#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
+#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
+
+/* Previous operation is done, ready to accept new requests */
+#define STATE_READY 0x00000000
+
+/* This state is used to mark that the next state isn't known yet */
+#define STATE_UNKNOWN 0x10000000
+
+/* Simulator's actions bit masks */
+#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
+#define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */
+#define ACTION_SECERASE 0x00300000 /* erase sector */
+#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
+#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
+#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
+#define ACTION_MASK 0x00700000 /* action mask */
+
+#define NS_OPER_NUM 13 /* Number of operations supported by the simulator */
+#define NS_OPER_STATES 6 /* Maximum number of states in operation */
+
+#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
+#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
+#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
+#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
+#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
+#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
+#define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */
+
+/* Remove action bits from state */
+#define NS_STATE(x) ((x) & ~ACTION_MASK)
+
+/*
+ * Maximum previous states which need to be saved. Currently saving is
+ * only needed for page program operation with preceded read command
+ * (which is only valid for 512-byte pages).
+ */
+#define NS_MAX_PREVSTATES 1
+
+/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
+#define NS_MAX_HELD_PAGES 16
+
+/*
+ * A union to represent flash memory contents and flash buffer.
+ */
+union ns_mem {
+ u_char *byte; /* for byte access */
+ uint16_t *word; /* for 16-bit word access */
+};
+
+/*
+ * The structure which describes all the internal simulator data.
+ */
+struct nandsim {
+ struct nand_chip chip;
+ struct nand_controller base;
+ struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
+ unsigned int nbparts;
+
+ uint busw; /* flash chip bus width (8 or 16) */
+ u_char ids[8]; /* chip's ID bytes */
+ uint32_t options; /* chip's characteristic bits */
+ uint32_t state; /* current chip state */
+ uint32_t nxstate; /* next expected state */
+
+ uint32_t *op; /* current operation, NULL operations isn't known yet */
+ uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
+ uint16_t npstates; /* number of previous states saved */
+ uint16_t stateidx; /* current state index */
+
+ /* The simulated NAND flash pages array */
+ union ns_mem *pages;
+
+ /* Slab allocator for nand pages */
+ struct kmem_cache *nand_pages_slab;
+
+ /* Internal buffer of page + OOB size bytes */
+ union ns_mem buf;
+
+ /* NAND flash "geometry" */
+ struct {
+ uint64_t totsz; /* total flash size, bytes */
+ uint32_t secsz; /* flash sector (erase block) size, bytes */
+ uint pgsz; /* NAND flash page size, bytes */
+ uint oobsz; /* page OOB area size, bytes */
+ uint64_t totszoob; /* total flash size including OOB, bytes */
+ uint pgszoob; /* page size including OOB , bytes*/
+ uint secszoob; /* sector size including OOB, bytes */
+ uint pgnum; /* total number of pages */
+ uint pgsec; /* number of pages per sector */
+ uint secshift; /* bits number in sector size */
+ uint pgshift; /* bits number in page size */
+ uint pgaddrbytes; /* bytes per page address */
+ uint secaddrbytes; /* bytes per sector address */
+ uint idbytes; /* the number ID bytes that this chip outputs */
+ } geom;
+
+ /* NAND flash internal registers */
+ struct {
+ unsigned command; /* the command register */
+ u_char status; /* the status register */
+ uint row; /* the page number */
+ uint column; /* the offset within page */
+ uint count; /* internal counter */
+ uint num; /* number of bytes which must be processed */
+ uint off; /* fixed page offset */
+ } regs;
+
+ /* NAND flash lines state */
+ struct {
+ int ce; /* chip Enable */
+ int cle; /* command Latch Enable */
+ int ale; /* address Latch Enable */
+ int wp; /* write Protect */
+ } lines;
+
+ /* Fields needed when using a cache file */
+ struct file *cfile; /* Open file */
+ unsigned long *pages_written; /* Which pages have been written */
+ void *file_buf;
+ struct page *held_pages[NS_MAX_HELD_PAGES];
+ int held_cnt;
+
+ /* debugfs entry */
+ struct dentry *dent;
+};
+
+/*
+ * Operations array. To perform any operation the simulator must pass
+ * through the correspondent states chain.
+ */
+static struct nandsim_operations {
+ uint32_t reqopts; /* options which are required to perform the operation */
+ uint32_t states[NS_OPER_STATES]; /* operation's states */
+} ops[NS_OPER_NUM] = {
+ /* Read page + OOB from the beginning */
+ {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Read page + OOB from the second half */
+ {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Read OOB */
+ {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Program page starting from the beginning */
+ {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
+ STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program page starting from the beginning */
+ {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program page starting from the second half */
+ {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program OOB */
+ {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Erase sector */
+ {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
+ /* Read status */
+ {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
+ /* Read ID */
+ {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
+ /* Large page devices read page */
+ {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Large page devices random page read */
+ {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+};
+
+struct weak_block {
+ struct list_head list;
+ unsigned int erase_block_no;
+ unsigned int max_erases;
+ unsigned int erases_done;
+};
+
+static LIST_HEAD(weak_blocks);
+
+struct weak_page {
+ struct list_head list;
+ unsigned int page_no;
+ unsigned int max_writes;
+ unsigned int writes_done;
+};
+
+static LIST_HEAD(weak_pages);
+
+struct grave_page {
+ struct list_head list;
+ unsigned int page_no;
+ unsigned int max_reads;
+ unsigned int reads_done;
+};
+
+static LIST_HEAD(grave_pages);
+
+static unsigned long *erase_block_wear = NULL;
+static unsigned int wear_eb_count = 0;
+static unsigned long total_wear = 0;
+
+/* MTD structure for NAND controller */
+static struct mtd_info *nsmtd;
+
+static int ns_show(struct seq_file *m, void *private)
+{
+ unsigned long wmin = -1, wmax = 0, avg;
+ unsigned long deciles[10], decile_max[10], tot = 0;
+ unsigned int i;
+
+ /* Calc wear stats */
+ for (i = 0; i < wear_eb_count; ++i) {
+ unsigned long wear = erase_block_wear[i];
+ if (wear < wmin)
+ wmin = wear;
+ if (wear > wmax)
+ wmax = wear;
+ tot += wear;
+ }
+
+ for (i = 0; i < 9; ++i) {
+ deciles[i] = 0;
+ decile_max[i] = (wmax * (i + 1) + 5) / 10;
+ }
+ deciles[9] = 0;
+ decile_max[9] = wmax;
+ for (i = 0; i < wear_eb_count; ++i) {
+ int d;
+ unsigned long wear = erase_block_wear[i];
+ for (d = 0; d < 10; ++d)
+ if (wear <= decile_max[d]) {
+ deciles[d] += 1;
+ break;
+ }
+ }
+ avg = tot / wear_eb_count;
+
+ /* Output wear report */
+ seq_printf(m, "Total numbers of erases: %lu\n", tot);
+ seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
+ seq_printf(m, "Average number of erases: %lu\n", avg);
+ seq_printf(m, "Maximum number of erases: %lu\n", wmax);
+ seq_printf(m, "Minimum number of erases: %lu\n", wmin);
+ for (i = 0; i < 10; ++i) {
+ unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+ if (from > decile_max[i])
+ continue;
+ seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
+ from,
+ decile_max[i],
+ deciles[i]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ns);
+
+/**
+ * ns_debugfs_create - initialize debugfs
+ * @ns: nandsim device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+static int ns_debugfs_create(struct nandsim *ns)
+{
+ struct dentry *root = nsmtd->dbg.dfs_dir;
+
+ /*
+ * Just skip debugfs initialization when the debugfs directory is
+ * missing.
+ */
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
+ return 0;
+ }
+
+ ns->dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns,
+ &ns_fops);
+ if (IS_ERR_OR_NULL(ns->dent)) {
+ NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void ns_debugfs_remove(struct nandsim *ns)
+{
+ debugfs_remove_recursive(ns->dent);
+}
+
+/*
+ * Allocate array of page pointers, create slab allocation for an array
+ * and initialize the array by NULL pointers.
+ *
+ * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
+ */
+static int __init ns_alloc_device(struct nandsim *ns)
+{
+ struct file *cfile;
+ int i, err;
+
+ if (cache_file) {
+ cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
+ if (IS_ERR(cfile))
+ return PTR_ERR(cfile);
+ if (!(cfile->f_mode & FMODE_CAN_READ)) {
+ NS_ERR("alloc_device: cache file not readable\n");
+ err = -EINVAL;
+ goto err_close_filp;
+ }
+ if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
+ NS_ERR("alloc_device: cache file not writeable\n");
+ err = -EINVAL;
+ goto err_close_filp;
+ }
+ ns->pages_written =
+ vzalloc(array_size(sizeof(unsigned long),
+ BITS_TO_LONGS(ns->geom.pgnum)));
+ if (!ns->pages_written) {
+ NS_ERR("alloc_device: unable to allocate pages written array\n");
+ err = -ENOMEM;
+ goto err_close_filp;
+ }
+ ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+ if (!ns->file_buf) {
+ NS_ERR("alloc_device: unable to allocate file buf\n");
+ err = -ENOMEM;
+ goto err_free_pw;
+ }
+ ns->cfile = cfile;
+
+ return 0;
+
+err_free_pw:
+ vfree(ns->pages_written);
+err_close_filp:
+ filp_close(cfile, NULL);
+
+ return err;
+ }
+
+ ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
+ if (!ns->pages) {
+ NS_ERR("alloc_device: unable to allocate page array\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < ns->geom.pgnum; i++) {
+ ns->pages[i].byte = NULL;
+ }
+ ns->nand_pages_slab = kmem_cache_create("nandsim",
+ ns->geom.pgszoob, 0, 0, NULL);
+ if (!ns->nand_pages_slab) {
+ NS_ERR("cache_create: unable to create kmem_cache\n");
+ err = -ENOMEM;
+ goto err_free_pg;
+ }
+
+ return 0;
+
+err_free_pg:
+ vfree(ns->pages);
+
+ return err;
+}
+
+/*
+ * Free any allocated pages, and free the array of page pointers.
+ */
+static void ns_free_device(struct nandsim *ns)
+{
+ int i;
+
+ if (ns->cfile) {
+ kfree(ns->file_buf);
+ vfree(ns->pages_written);
+ filp_close(ns->cfile, NULL);
+ return;
+ }
+
+ if (ns->pages) {
+ for (i = 0; i < ns->geom.pgnum; i++) {
+ if (ns->pages[i].byte)
+ kmem_cache_free(ns->nand_pages_slab,
+ ns->pages[i].byte);
+ }
+ kmem_cache_destroy(ns->nand_pages_slab);
+ vfree(ns->pages);
+ }
+}
+
+static char __init *ns_get_partition_name(int i)
+{
+ return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
+}
+
+/*
+ * Initialize the nandsim structure.
+ *
+ * RETURNS: 0 if success, -ERRNO if failure.
+ */
+static int __init ns_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nandsim *ns = nand_get_controller_data(chip);
+ int i, ret = 0;
+ uint64_t remains;
+ uint64_t next_offset;
+
+ if (NS_IS_INITIALIZED(ns)) {
+ NS_ERR("init_nandsim: nandsim is already initialized\n");
+ return -EIO;
+ }
+
+ /* Initialize the NAND flash parameters */
+ ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
+ ns->geom.totsz = mtd->size;
+ ns->geom.pgsz = mtd->writesize;
+ ns->geom.oobsz = mtd->oobsize;
+ ns->geom.secsz = mtd->erasesize;
+ ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
+ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
+ ns->geom.secshift = ffs(ns->geom.secsz) - 1;
+ ns->geom.pgshift = chip->page_shift;
+ ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
+ ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
+ ns->options = 0;
+
+ if (ns->geom.pgsz == 512) {
+ ns->options |= OPT_PAGE512;
+ if (ns->busw == 8)
+ ns->options |= OPT_PAGE512_8BIT;
+ } else if (ns->geom.pgsz == 2048) {
+ ns->options |= OPT_PAGE2048;
+ } else if (ns->geom.pgsz == 4096) {
+ ns->options |= OPT_PAGE4096;
+ } else {
+ NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
+ return -EIO;
+ }
+
+ if (ns->options & OPT_SMALLPAGE) {
+ if (ns->geom.totsz <= (32 << 20)) {
+ ns->geom.pgaddrbytes = 3;
+ ns->geom.secaddrbytes = 2;
+ } else {
+ ns->geom.pgaddrbytes = 4;
+ ns->geom.secaddrbytes = 3;
+ }
+ } else {
+ if (ns->geom.totsz <= (128 << 20)) {
+ ns->geom.pgaddrbytes = 4;
+ ns->geom.secaddrbytes = 2;
+ } else {
+ ns->geom.pgaddrbytes = 5;
+ ns->geom.secaddrbytes = 3;
+ }
+ }
+
+ /* Fill the partition_info structure */
+ if (parts_num > ARRAY_SIZE(ns->partitions)) {
+ NS_ERR("too many partitions.\n");
+ return -EINVAL;
+ }
+ remains = ns->geom.totsz;
+ next_offset = 0;
+ for (i = 0; i < parts_num; ++i) {
+ uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
+
+ if (!part_sz || part_sz > remains) {
+ NS_ERR("bad partition size.\n");
+ return -EINVAL;
+ }
+ ns->partitions[i].name = ns_get_partition_name(i);
+ if (!ns->partitions[i].name) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ ns->partitions[i].offset = next_offset;
+ ns->partitions[i].size = part_sz;
+ next_offset += ns->partitions[i].size;
+ remains -= ns->partitions[i].size;
+ }
+ ns->nbparts = parts_num;
+ if (remains) {
+ if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
+ NS_ERR("too many partitions.\n");
+ ret = -EINVAL;
+ goto free_partition_names;
+ }
+ ns->partitions[i].name = ns_get_partition_name(i);
+ if (!ns->partitions[i].name) {
+ NS_ERR("unable to allocate memory.\n");
+ ret = -ENOMEM;
+ goto free_partition_names;
+ }
+ ns->partitions[i].offset = next_offset;
+ ns->partitions[i].size = remains;
+ ns->nbparts += 1;
+ }
+
+ if (ns->busw == 16)
+ NS_WARN("16-bit flashes support wasn't tested\n");
+
+ printk("flash size: %llu MiB\n",
+ (unsigned long long)ns->geom.totsz >> 20);
+ printk("page size: %u bytes\n", ns->geom.pgsz);
+ printk("OOB area size: %u bytes\n", ns->geom.oobsz);
+ printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
+ printk("pages number: %u\n", ns->geom.pgnum);
+ printk("pages per sector: %u\n", ns->geom.pgsec);
+ printk("bus width: %u\n", ns->busw);
+ printk("bits in sector size: %u\n", ns->geom.secshift);
+ printk("bits in page size: %u\n", ns->geom.pgshift);
+ printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1);
+ printk("flash size with OOB: %llu KiB\n",
+ (unsigned long long)ns->geom.totszoob >> 10);
+ printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
+ printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
+ printk("options: %#x\n", ns->options);
+
+ ret = ns_alloc_device(ns);
+ if (ret)
+ goto free_partition_names;
+
+ /* Allocate / initialize the internal buffer */
+ ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+ if (!ns->buf.byte) {
+ NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
+ ns->geom.pgszoob);
+ ret = -ENOMEM;
+ goto free_device;
+ }
+ memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
+
+ return 0;
+
+free_device:
+ ns_free_device(ns);
+free_partition_names:
+ for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
+ kfree(ns->partitions[i].name);
+
+ return ret;
+}
+
+/*
+ * Free the nandsim structure.
+ */
+static void ns_free(struct nandsim *ns)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
+ kfree(ns->partitions[i].name);
+
+ kfree(ns->buf.byte);
+ ns_free_device(ns);
+
+ return;
+}
+
+static int ns_parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
+{
+ char *w;
+ int zero_ok;
+ unsigned int erase_block_no;
+ loff_t offset;
+
+ if (!badblocks)
+ return 0;
+ w = badblocks;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ erase_block_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !erase_block_no) {
+ NS_ERR("invalid badblocks.\n");
+ return -EINVAL;
+ }
+ offset = (loff_t)erase_block_no * ns->geom.secsz;
+ if (mtd_block_markbad(mtd, offset)) {
+ NS_ERR("invalid badblocks.\n");
+ return -EINVAL;
+ }
+ if (*w == ',')
+ w += 1;
+ } while (*w);
+ return 0;
+}
+
+static int ns_parse_weakblocks(void)
+{
+ char *w;
+ int zero_ok;
+ unsigned int erase_block_no;
+ unsigned int max_erases;
+ struct weak_block *wb;
+
+ if (!weakblocks)
+ return 0;
+ w = weakblocks;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ erase_block_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !erase_block_no) {
+ NS_ERR("invalid weakblocks.\n");
+ return -EINVAL;
+ }
+ max_erases = 3;
+ if (*w == ':') {
+ w += 1;
+ max_erases = simple_strtoul(w, &w, 0);
+ }
+ if (*w == ',')
+ w += 1;
+ wb = kzalloc(sizeof(*wb), GFP_KERNEL);
+ if (!wb) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ wb->erase_block_no = erase_block_no;
+ wb->max_erases = max_erases;
+ list_add(&wb->list, &weak_blocks);
+ } while (*w);
+ return 0;
+}
+
+static int ns_erase_error(unsigned int erase_block_no)
+{
+ struct weak_block *wb;
+
+ list_for_each_entry(wb, &weak_blocks, list)
+ if (wb->erase_block_no == erase_block_no) {
+ if (wb->erases_done >= wb->max_erases)
+ return 1;
+ wb->erases_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int ns_parse_weakpages(void)
+{
+ char *w;
+ int zero_ok;
+ unsigned int page_no;
+ unsigned int max_writes;
+ struct weak_page *wp;
+
+ if (!weakpages)
+ return 0;
+ w = weakpages;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ page_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !page_no) {
+ NS_ERR("invalid weakpages.\n");
+ return -EINVAL;
+ }
+ max_writes = 3;
+ if (*w == ':') {
+ w += 1;
+ max_writes = simple_strtoul(w, &w, 0);
+ }
+ if (*w == ',')
+ w += 1;
+ wp = kzalloc(sizeof(*wp), GFP_KERNEL);
+ if (!wp) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ wp->page_no = page_no;
+ wp->max_writes = max_writes;
+ list_add(&wp->list, &weak_pages);
+ } while (*w);
+ return 0;
+}
+
+static int ns_write_error(unsigned int page_no)
+{
+ struct weak_page *wp;
+
+ list_for_each_entry(wp, &weak_pages, list)
+ if (wp->page_no == page_no) {
+ if (wp->writes_done >= wp->max_writes)
+ return 1;
+ wp->writes_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int ns_parse_gravepages(void)
+{
+ char *g;
+ int zero_ok;
+ unsigned int page_no;
+ unsigned int max_reads;
+ struct grave_page *gp;
+
+ if (!gravepages)
+ return 0;
+ g = gravepages;
+ do {
+ zero_ok = (*g == '0' ? 1 : 0);
+ page_no = simple_strtoul(g, &g, 0);
+ if (!zero_ok && !page_no) {
+ NS_ERR("invalid gravepagess.\n");
+ return -EINVAL;
+ }
+ max_reads = 3;
+ if (*g == ':') {
+ g += 1;
+ max_reads = simple_strtoul(g, &g, 0);
+ }
+ if (*g == ',')
+ g += 1;
+ gp = kzalloc(sizeof(*gp), GFP_KERNEL);
+ if (!gp) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ gp->page_no = page_no;
+ gp->max_reads = max_reads;
+ list_add(&gp->list, &grave_pages);
+ } while (*g);
+ return 0;
+}
+
+static int ns_read_error(unsigned int page_no)
+{
+ struct grave_page *gp;
+
+ list_for_each_entry(gp, &grave_pages, list)
+ if (gp->page_no == page_no) {
+ if (gp->reads_done >= gp->max_reads)
+ return 1;
+ gp->reads_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int ns_setup_wear_reporting(struct mtd_info *mtd)
+{
+ size_t mem;
+
+ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
+ mem = wear_eb_count * sizeof(unsigned long);
+ if (mem / sizeof(unsigned long) != wear_eb_count) {
+ NS_ERR("Too many erase blocks for wear reporting\n");
+ return -ENOMEM;
+ }
+ erase_block_wear = kzalloc(mem, GFP_KERNEL);
+ if (!erase_block_wear) {
+ NS_ERR("Too many erase blocks for wear reporting\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void ns_update_wear(unsigned int erase_block_no)
+{
+ if (!erase_block_wear)
+ return;
+ total_wear += 1;
+ /*
+ * TODO: Notify this through a debugfs entry,
+ * instead of showing an error message.
+ */
+ if (total_wear == 0)
+ NS_ERR("Erase counter total overflow\n");
+ erase_block_wear[erase_block_no] += 1;
+ if (erase_block_wear[erase_block_no] == 0)
+ NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
+}
+
+/*
+ * Returns the string representation of 'state' state.
+ */
+static char *ns_get_state_name(uint32_t state)
+{
+ switch (NS_STATE(state)) {
+ case STATE_CMD_READ0:
+ return "STATE_CMD_READ0";
+ case STATE_CMD_READ1:
+ return "STATE_CMD_READ1";
+ case STATE_CMD_PAGEPROG:
+ return "STATE_CMD_PAGEPROG";
+ case STATE_CMD_READOOB:
+ return "STATE_CMD_READOOB";
+ case STATE_CMD_READSTART:
+ return "STATE_CMD_READSTART";
+ case STATE_CMD_ERASE1:
+ return "STATE_CMD_ERASE1";
+ case STATE_CMD_STATUS:
+ return "STATE_CMD_STATUS";
+ case STATE_CMD_SEQIN:
+ return "STATE_CMD_SEQIN";
+ case STATE_CMD_READID:
+ return "STATE_CMD_READID";
+ case STATE_CMD_ERASE2:
+ return "STATE_CMD_ERASE2";
+ case STATE_CMD_RESET:
+ return "STATE_CMD_RESET";
+ case STATE_CMD_RNDOUT:
+ return "STATE_CMD_RNDOUT";
+ case STATE_CMD_RNDOUTSTART:
+ return "STATE_CMD_RNDOUTSTART";
+ case STATE_ADDR_PAGE:
+ return "STATE_ADDR_PAGE";
+ case STATE_ADDR_SEC:
+ return "STATE_ADDR_SEC";
+ case STATE_ADDR_ZERO:
+ return "STATE_ADDR_ZERO";
+ case STATE_ADDR_COLUMN:
+ return "STATE_ADDR_COLUMN";
+ case STATE_DATAIN:
+ return "STATE_DATAIN";
+ case STATE_DATAOUT:
+ return "STATE_DATAOUT";
+ case STATE_DATAOUT_ID:
+ return "STATE_DATAOUT_ID";
+ case STATE_DATAOUT_STATUS:
+ return "STATE_DATAOUT_STATUS";
+ case STATE_READY:
+ return "STATE_READY";
+ case STATE_UNKNOWN:
+ return "STATE_UNKNOWN";
+ }
+
+ NS_ERR("get_state_name: unknown state, BUG\n");
+ return NULL;
+}
+
+/*
+ * Check if command is valid.
+ *
+ * RETURNS: 1 if wrong command, 0 if right.
+ */
+static int ns_check_command(int cmd)
+{
+ switch (cmd) {
+
+ case NAND_CMD_READ0:
+ case NAND_CMD_READ1:
+ case NAND_CMD_READSTART:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_READOOB:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_READID:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_RESET:
+ case NAND_CMD_RNDOUT:
+ case NAND_CMD_RNDOUTSTART:
+ return 0;
+
+ default:
+ return 1;
+ }
+}
+
+/*
+ * Returns state after command is accepted by command number.
+ */
+static uint32_t ns_get_state_by_command(unsigned command)
+{
+ switch (command) {
+ case NAND_CMD_READ0:
+ return STATE_CMD_READ0;
+ case NAND_CMD_READ1:
+ return STATE_CMD_READ1;
+ case NAND_CMD_PAGEPROG:
+ return STATE_CMD_PAGEPROG;
+ case NAND_CMD_READSTART:
+ return STATE_CMD_READSTART;
+ case NAND_CMD_READOOB:
+ return STATE_CMD_READOOB;
+ case NAND_CMD_ERASE1:
+ return STATE_CMD_ERASE1;
+ case NAND_CMD_STATUS:
+ return STATE_CMD_STATUS;
+ case NAND_CMD_SEQIN:
+ return STATE_CMD_SEQIN;
+ case NAND_CMD_READID:
+ return STATE_CMD_READID;
+ case NAND_CMD_ERASE2:
+ return STATE_CMD_ERASE2;
+ case NAND_CMD_RESET:
+ return STATE_CMD_RESET;
+ case NAND_CMD_RNDOUT:
+ return STATE_CMD_RNDOUT;
+ case NAND_CMD_RNDOUTSTART:
+ return STATE_CMD_RNDOUTSTART;
+ }
+
+ NS_ERR("get_state_by_command: unknown command, BUG\n");
+ return 0;
+}
+
+/*
+ * Move an address byte to the correspondent internal register.
+ */
+static inline void ns_accept_addr_byte(struct nandsim *ns, u_char bt)
+{
+ uint byte = (uint)bt;
+
+ if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
+ ns->regs.column |= (byte << 8 * ns->regs.count);
+ else {
+ ns->regs.row |= (byte << 8 * (ns->regs.count -
+ ns->geom.pgaddrbytes +
+ ns->geom.secaddrbytes));
+ }
+
+ return;
+}
+
+/*
+ * Switch to STATE_READY state.
+ */
+static inline void ns_switch_to_ready_state(struct nandsim *ns, u_char status)
+{
+ NS_DBG("switch_to_ready_state: switch to %s state\n",
+ ns_get_state_name(STATE_READY));
+
+ ns->state = STATE_READY;
+ ns->nxstate = STATE_UNKNOWN;
+ ns->op = NULL;
+ ns->npstates = 0;
+ ns->stateidx = 0;
+ ns->regs.num = 0;
+ ns->regs.count = 0;
+ ns->regs.off = 0;
+ ns->regs.row = 0;
+ ns->regs.column = 0;
+ ns->regs.status = status;
+}
+
+/*
+ * If the operation isn't known yet, try to find it in the global array
+ * of supported operations.
+ *
+ * Operation can be unknown because of the following.
+ * 1. New command was accepted and this is the first call to find the
+ * correspondent states chain. In this case ns->npstates = 0;
+ * 2. There are several operations which begin with the same command(s)
+ * (for example program from the second half and read from the
+ * second half operations both begin with the READ1 command). In this
+ * case the ns->pstates[] array contains previous states.
+ *
+ * Thus, the function tries to find operation containing the following
+ * states (if the 'flag' parameter is 0):
+ * ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
+ *
+ * If (one and only one) matching operation is found, it is accepted (
+ * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
+ * zeroed).
+ *
+ * If there are several matches, the current state is pushed to the
+ * ns->pstates.
+ *
+ * The operation can be unknown only while commands are input to the chip.
+ * As soon as address command is accepted, the operation must be known.
+ * In such situation the function is called with 'flag' != 0, and the
+ * operation is searched using the following pattern:
+ * ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
+ *
+ * It is supposed that this pattern must either match one operation or
+ * none. There can't be ambiguity in that case.
+ *
+ * If no matches found, the function does the following:
+ * 1. if there are saved states present, try to ignore them and search
+ * again only using the last command. If nothing was found, switch
+ * to the STATE_READY state.
+ * 2. if there are no saved states, switch to the STATE_READY state.
+ *
+ * RETURNS: -2 - no matched operations found.
+ * -1 - several matches.
+ * 0 - operation is found.
+ */
+static int ns_find_operation(struct nandsim *ns, uint32_t flag)
+{
+ int opsfound = 0;
+ int i, j, idx = 0;
+
+ for (i = 0; i < NS_OPER_NUM; i++) {
+
+ int found = 1;
+
+ if (!(ns->options & ops[i].reqopts))
+ /* Ignore operations we can't perform */
+ continue;
+
+ if (flag) {
+ if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
+ continue;
+ } else {
+ if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
+ continue;
+ }
+
+ for (j = 0; j < ns->npstates; j++)
+ if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
+ && (ns->options & ops[idx].reqopts)) {
+ found = 0;
+ break;
+ }
+
+ if (found) {
+ idx = i;
+ opsfound += 1;
+ }
+ }
+
+ if (opsfound == 1) {
+ /* Exact match */
+ ns->op = &ops[idx].states[0];
+ if (flag) {
+ /*
+ * In this case the find_operation function was
+ * called when address has just began input. But it isn't
+ * yet fully input and the current state must
+ * not be one of STATE_ADDR_*, but the STATE_ADDR_*
+ * state must be the next state (ns->nxstate).
+ */
+ ns->stateidx = ns->npstates - 1;
+ } else {
+ ns->stateidx = ns->npstates;
+ }
+ ns->npstates = 0;
+ ns->state = ns->op[ns->stateidx];
+ ns->nxstate = ns->op[ns->stateidx + 1];
+ NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
+ idx, ns_get_state_name(ns->state),
+ ns_get_state_name(ns->nxstate));
+ return 0;
+ }
+
+ if (opsfound == 0) {
+ /* Nothing was found. Try to ignore previous commands (if any) and search again */
+ if (ns->npstates != 0) {
+ NS_DBG("find_operation: no operation found, try again with state %s\n",
+ ns_get_state_name(ns->state));
+ ns->npstates = 0;
+ return ns_find_operation(ns, 0);
+
+ }
+ NS_DBG("find_operation: no operations found\n");
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return -2;
+ }
+
+ if (flag) {
+ /* This shouldn't happen */
+ NS_DBG("find_operation: BUG, operation must be known if address is input\n");
+ return -2;
+ }
+
+ NS_DBG("find_operation: there is still ambiguity\n");
+
+ ns->pstates[ns->npstates++] = ns->state;
+
+ return -1;
+}
+
+static void ns_put_pages(struct nandsim *ns)
+{
+ int i;
+
+ for (i = 0; i < ns->held_cnt; i++)
+ put_page(ns->held_pages[i]);
+}
+
+/* Get page cache pages in advance to provide NOFS memory allocation */
+static int ns_get_pages(struct nandsim *ns, struct file *file, size_t count,
+ loff_t pos)
+{
+ pgoff_t index, start_index, end_index;
+ struct page *page;
+ struct address_space *mapping = file->f_mapping;
+
+ start_index = pos >> PAGE_SHIFT;
+ end_index = (pos + count - 1) >> PAGE_SHIFT;
+ if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
+ return -EINVAL;
+ ns->held_cnt = 0;
+ for (index = start_index; index <= end_index; index++) {
+ page = find_get_page(mapping, index);
+ if (page == NULL) {
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ if (page == NULL) {
+ write_inode_now(mapping->host, 1);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ }
+ if (page == NULL) {
+ ns_put_pages(ns);
+ return -ENOMEM;
+ }
+ unlock_page(page);
+ }
+ ns->held_pages[ns->held_cnt++] = page;
+ }
+ return 0;
+}
+
+static ssize_t ns_read_file(struct nandsim *ns, struct file *file, void *buf,
+ size_t count, loff_t pos)
+{
+ ssize_t tx;
+ int err;
+ unsigned int noreclaim_flag;
+
+ err = ns_get_pages(ns, file, count, pos);
+ if (err)
+ return err;
+ noreclaim_flag = memalloc_noreclaim_save();
+ tx = kernel_read(file, buf, count, &pos);
+ memalloc_noreclaim_restore(noreclaim_flag);
+ ns_put_pages(ns);
+ return tx;
+}
+
+static ssize_t ns_write_file(struct nandsim *ns, struct file *file, void *buf,
+ size_t count, loff_t pos)
+{
+ ssize_t tx;
+ int err;
+ unsigned int noreclaim_flag;
+
+ err = ns_get_pages(ns, file, count, pos);
+ if (err)
+ return err;
+ noreclaim_flag = memalloc_noreclaim_save();
+ tx = kernel_write(file, buf, count, &pos);
+ memalloc_noreclaim_restore(noreclaim_flag);
+ ns_put_pages(ns);
+ return tx;
+}
+
+/*
+ * Returns a pointer to the current page.
+ */
+static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
+{
+ return &(ns->pages[ns->regs.row]);
+}
+
+/*
+ * Retuns a pointer to the current byte, within the current page.
+ */
+static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
+{
+ return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
+}
+
+static int ns_do_read_error(struct nandsim *ns, int num)
+{
+ unsigned int page_no = ns->regs.row;
+
+ if (ns_read_error(page_no)) {
+ prandom_bytes(ns->buf.byte, num);
+ NS_WARN("simulating read error in page %u\n", page_no);
+ return 1;
+ }
+ return 0;
+}
+
+static void ns_do_bit_flips(struct nandsim *ns, int num)
+{
+ if (bitflips && prandom_u32() < (1 << 22)) {
+ int flips = 1;
+ if (bitflips > 1)
+ flips = (prandom_u32() % (int) bitflips) + 1;
+ while (flips--) {
+ int pos = prandom_u32() % (num * 8);
+ ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
+ NS_WARN("read_page: flipping bit %d in page %d "
+ "reading from %d ecc: corrected=%u failed=%u\n",
+ pos, ns->regs.row, ns->regs.column + ns->regs.off,
+ nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
+ }
+ }
+}
+
+/*
+ * Fill the NAND buffer with data read from the specified page.
+ */
+static void ns_read_page(struct nandsim *ns, int num)
+{
+ union ns_mem *mypage;
+
+ if (ns->cfile) {
+ if (!test_bit(ns->regs.row, ns->pages_written)) {
+ NS_DBG("read_page: page %d not written\n", ns->regs.row);
+ memset(ns->buf.byte, 0xFF, num);
+ } else {
+ loff_t pos;
+ ssize_t tx;
+
+ NS_DBG("read_page: page %d written, reading from %d\n",
+ ns->regs.row, ns->regs.column + ns->regs.off);
+ if (ns_do_read_error(ns, num))
+ return;
+ pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+ tx = ns_read_file(ns, ns->cfile, ns->buf.byte, num,
+ pos);
+ if (tx != num) {
+ NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return;
+ }
+ ns_do_bit_flips(ns, num);
+ }
+ return;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ if (mypage->byte == NULL) {
+ NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
+ memset(ns->buf.byte, 0xFF, num);
+ } else {
+ NS_DBG("read_page: page %d allocated, reading from %d\n",
+ ns->regs.row, ns->regs.column + ns->regs.off);
+ if (ns_do_read_error(ns, num))
+ return;
+ memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
+ ns_do_bit_flips(ns, num);
+ }
+}
+
+/*
+ * Erase all pages in the specified sector.
+ */
+static void ns_erase_sector(struct nandsim *ns)
+{
+ union ns_mem *mypage;
+ int i;
+
+ if (ns->cfile) {
+ for (i = 0; i < ns->geom.pgsec; i++)
+ if (__test_and_clear_bit(ns->regs.row + i,
+ ns->pages_written)) {
+ NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
+ }
+ return;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ for (i = 0; i < ns->geom.pgsec; i++) {
+ if (mypage->byte != NULL) {
+ NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
+ kmem_cache_free(ns->nand_pages_slab, mypage->byte);
+ mypage->byte = NULL;
+ }
+ mypage++;
+ }
+}
+
+/*
+ * Program the specified page with the contents from the NAND buffer.
+ */
+static int ns_prog_page(struct nandsim *ns, int num)
+{
+ int i;
+ union ns_mem *mypage;
+ u_char *pg_off;
+
+ if (ns->cfile) {
+ loff_t off;
+ ssize_t tx;
+ int all;
+
+ NS_DBG("prog_page: writing page %d\n", ns->regs.row);
+ pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
+ off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+ if (!test_bit(ns->regs.row, ns->pages_written)) {
+ all = 1;
+ memset(ns->file_buf, 0xff, ns->geom.pgszoob);
+ } else {
+ all = 0;
+ tx = ns_read_file(ns, ns->cfile, pg_off, num, off);
+ if (tx != num) {
+ NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ }
+ for (i = 0; i < num; i++)
+ pg_off[i] &= ns->buf.byte[i];
+ if (all) {
+ loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+ tx = ns_write_file(ns, ns->cfile, ns->file_buf,
+ ns->geom.pgszoob, pos);
+ if (tx != ns->geom.pgszoob) {
+ NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ __set_bit(ns->regs.row, ns->pages_written);
+ } else {
+ tx = ns_write_file(ns, ns->cfile, pg_off, num, off);
+ if (tx != num) {
+ NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ if (mypage->byte == NULL) {
+ NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
+ /*
+ * We allocate memory with GFP_NOFS because a flash FS may
+ * utilize this. If it is holding an FS lock, then gets here,
+ * then kernel memory alloc runs writeback which goes to the FS
+ * again and deadlocks. This was seen in practice.
+ */
+ mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
+ if (mypage->byte == NULL) {
+ NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
+ return -1;
+ }
+ memset(mypage->byte, 0xFF, ns->geom.pgszoob);
+ }
+
+ pg_off = NS_PAGE_BYTE_OFF(ns);
+ for (i = 0; i < num; i++)
+ pg_off[i] &= ns->buf.byte[i];
+
+ return 0;
+}
+
+/*
+ * If state has any action bit, perform this action.
+ *
+ * RETURNS: 0 if success, -1 if error.
+ */
+static int ns_do_state_action(struct nandsim *ns, uint32_t action)
+{
+ int num;
+ int busdiv = ns->busw == 8 ? 1 : 2;
+ unsigned int erase_block_no, page_no;
+
+ action &= ACTION_MASK;
+
+ /* Check that page address input is correct */
+ if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
+ NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
+ return -1;
+ }
+
+ switch (action) {
+
+ case ACTION_CPY:
+ /*
+ * Copy page data to the internal buffer.
+ */
+
+ /* Column shouldn't be very large */
+ if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
+ NS_ERR("do_state_action: column number is too large\n");
+ break;
+ }
+ num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ ns_read_page(ns, num);
+
+ NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
+ num, NS_RAW_OFFSET(ns) + ns->regs.off);
+
+ if (ns->regs.off == 0)
+ NS_LOG("read page %d\n", ns->regs.row);
+ else if (ns->regs.off < ns->geom.pgsz)
+ NS_LOG("read page %d (second half)\n", ns->regs.row);
+ else
+ NS_LOG("read OOB of page %d\n", ns->regs.row);
+
+ NS_UDELAY(access_delay);
+ NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+ break;
+
+ case ACTION_SECERASE:
+ /*
+ * Erase sector.
+ */
+
+ if (ns->lines.wp) {
+ NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
+ return -1;
+ }
+
+ if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
+ || (ns->regs.row & ~(ns->geom.secsz - 1))) {
+ NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
+ return -1;
+ }
+
+ ns->regs.row = (ns->regs.row <<
+ 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
+ ns->regs.column = 0;
+
+ erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
+
+ NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
+ ns->regs.row, NS_RAW_OFFSET(ns));
+ NS_LOG("erase sector %u\n", erase_block_no);
+
+ ns_erase_sector(ns);
+
+ NS_MDELAY(erase_delay);
+
+ if (erase_block_wear)
+ ns_update_wear(erase_block_no);
+
+ if (ns_erase_error(erase_block_no)) {
+ NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
+ return -1;
+ }
+
+ break;
+
+ case ACTION_PRGPAGE:
+ /*
+ * Program page - move internal buffer data to the page.
+ */
+
+ if (ns->lines.wp) {
+ NS_WARN("do_state_action: device is write-protected, programm\n");
+ return -1;
+ }
+
+ num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ if (num != ns->regs.count) {
+ NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
+ ns->regs.count, num);
+ return -1;
+ }
+
+ if (ns_prog_page(ns, num) == -1)
+ return -1;
+
+ page_no = ns->regs.row;
+
+ NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
+ num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
+ NS_LOG("programm page %d\n", ns->regs.row);
+
+ NS_UDELAY(programm_delay);
+ NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+ if (ns_write_error(page_no)) {
+ NS_WARN("simulating write failure in page %u\n", page_no);
+ return -1;
+ }
+
+ break;
+
+ case ACTION_ZEROOFF:
+ NS_DBG("do_state_action: set internal offset to 0\n");
+ ns->regs.off = 0;
+ break;
+
+ case ACTION_HALFOFF:
+ if (!(ns->options & OPT_PAGE512_8BIT)) {
+ NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
+ "byte page size 8x chips\n");
+ return -1;
+ }
+ NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
+ ns->regs.off = ns->geom.pgsz/2;
+ break;
+
+ case ACTION_OOBOFF:
+ NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
+ ns->regs.off = ns->geom.pgsz;
+ break;
+
+ default:
+ NS_DBG("do_state_action: BUG! unknown action\n");
+ }
+
+ return 0;
+}
+
+/*
+ * Switch simulator's state.
+ */
+static void ns_switch_state(struct nandsim *ns)
+{
+ if (ns->op) {
+ /*
+ * The current operation have already been identified.
+ * Just follow the states chain.
+ */
+
+ ns->stateidx += 1;
+ ns->state = ns->nxstate;
+ ns->nxstate = ns->op[ns->stateidx + 1];
+
+ NS_DBG("switch_state: operation is known, switch to the next state, "
+ "state: %s, nxstate: %s\n",
+ ns_get_state_name(ns->state),
+ ns_get_state_name(ns->nxstate));
+
+ /* See, whether we need to do some action */
+ if ((ns->state & ACTION_MASK) &&
+ ns_do_state_action(ns, ns->state) < 0) {
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ } else {
+ /*
+ * We don't yet know which operation we perform.
+ * Try to identify it.
+ */
+
+ /*
+ * The only event causing the switch_state function to
+ * be called with yet unknown operation is new command.
+ */
+ ns->state = ns_get_state_by_command(ns->regs.command);
+
+ NS_DBG("switch_state: operation is unknown, try to find it\n");
+
+ if (ns_find_operation(ns, 0))
+ return;
+
+ if ((ns->state & ACTION_MASK) &&
+ ns_do_state_action(ns, ns->state) < 0) {
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+ }
+
+ /* For 16x devices column means the page offset in words */
+ if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
+ NS_DBG("switch_state: double the column number for 16x device\n");
+ ns->regs.column <<= 1;
+ }
+
+ if (NS_STATE(ns->nxstate) == STATE_READY) {
+ /*
+ * The current state is the last. Return to STATE_READY
+ */
+
+ u_char status = NS_STATUS_OK(ns);
+
+ /* In case of data states, see if all bytes were input/output */
+ if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
+ && ns->regs.count != ns->regs.num) {
+ NS_WARN("switch_state: not all bytes were processed, %d left\n",
+ ns->regs.num - ns->regs.count);
+ status = NS_STATUS_FAILED(ns);
+ }
+
+ NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
+
+ ns_switch_to_ready_state(ns, status);
+
+ return;
+ } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
+ /*
+ * If the next state is data input/output, switch to it now
+ */
+
+ ns->state = ns->nxstate;
+ ns->nxstate = ns->op[++ns->stateidx + 1];
+ ns->regs.num = ns->regs.count = 0;
+
+ NS_DBG("switch_state: the next state is data I/O, switch, "
+ "state: %s, nxstate: %s\n",
+ ns_get_state_name(ns->state),
+ ns_get_state_name(ns->nxstate));
+
+ /*
+ * Set the internal register to the count of bytes which
+ * are expected to be input or output
+ */
+ switch (NS_STATE(ns->state)) {
+ case STATE_DATAIN:
+ case STATE_DATAOUT:
+ ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ break;
+
+ case STATE_DATAOUT_ID:
+ ns->regs.num = ns->geom.idbytes;
+ break;
+
+ case STATE_DATAOUT_STATUS:
+ ns->regs.count = ns->regs.num = 0;
+ break;
+
+ default:
+ NS_ERR("switch_state: BUG! unknown data state\n");
+ }
+
+ } else if (ns->nxstate & STATE_ADDR_MASK) {
+ /*
+ * If the next state is address input, set the internal
+ * register to the number of expected address bytes
+ */
+
+ ns->regs.count = 0;
+
+ switch (NS_STATE(ns->nxstate)) {
+ case STATE_ADDR_PAGE:
+ ns->regs.num = ns->geom.pgaddrbytes;
+
+ break;
+ case STATE_ADDR_SEC:
+ ns->regs.num = ns->geom.secaddrbytes;
+ break;
+
+ case STATE_ADDR_ZERO:
+ ns->regs.num = 1;
+ break;
+
+ case STATE_ADDR_COLUMN:
+ /* Column address is always 2 bytes */
+ ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
+ break;
+
+ default:
+ NS_ERR("switch_state: BUG! unknown address state\n");
+ }
+ } else {
+ /*
+ * Just reset internal counters.
+ */
+
+ ns->regs.num = 0;
+ ns->regs.count = 0;
+ }
+}
+
+static u_char ns_nand_read_byte(struct nand_chip *chip)
+{
+ struct nandsim *ns = nand_get_controller_data(chip);
+ u_char outb = 0x00;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
+ return outb;
+ }
+ if (ns->lines.ale || ns->lines.cle) {
+ NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
+ return outb;
+ }
+ if (!(ns->state & STATE_DATAOUT_MASK)) {
+ NS_WARN("read_byte: unexpected data output cycle, state is %s return %#x\n",
+ ns_get_state_name(ns->state), (uint)outb);
+ return outb;
+ }
+
+ /* Status register may be read as many times as it is wanted */
+ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
+ NS_DBG("read_byte: return %#x status\n", ns->regs.status);
+ return ns->regs.status;
+ }
+
+ /* Check if there is any data in the internal buffer which may be read */
+ if (ns->regs.count == ns->regs.num) {
+ NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
+ return outb;
+ }
+
+ switch (NS_STATE(ns->state)) {
+ case STATE_DATAOUT:
+ if (ns->busw == 8) {
+ outb = ns->buf.byte[ns->regs.count];
+ ns->regs.count += 1;
+ } else {
+ outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
+ ns->regs.count += 2;
+ }
+ break;
+ case STATE_DATAOUT_ID:
+ NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
+ outb = ns->ids[ns->regs.count];
+ ns->regs.count += 1;
+ break;
+ default:
+ BUG();
+ }
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("read_byte: all bytes were read\n");
+
+ if (NS_STATE(ns->nxstate) == STATE_READY)
+ ns_switch_state(ns);
+ }
+
+ return outb;
+}
+
+static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
+{
+ struct nandsim *ns = nand_get_controller_data(chip);
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("write_byte: chip is disabled, ignore write\n");
+ return;
+ }
+ if (ns->lines.ale && ns->lines.cle) {
+ NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
+ return;
+ }
+
+ if (ns->lines.cle == 1) {
+ /*
+ * The byte written is a command.
+ */
+
+ if (byte == NAND_CMD_RESET) {
+ NS_LOG("reset chip\n");
+ ns_switch_to_ready_state(ns, NS_STATUS_OK(ns));
+ return;
+ }
+
+ /* Check that the command byte is correct */
+ if (ns_check_command(byte)) {
+ NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
+ return;
+ }
+
+ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
+ || NS_STATE(ns->state) == STATE_DATAOUT) {
+ int row = ns->regs.row;
+
+ ns_switch_state(ns);
+ if (byte == NAND_CMD_RNDOUT)
+ ns->regs.row = row;
+ }
+
+ /* Check if chip is expecting command */
+ if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
+ /* Do not warn if only 2 id bytes are read */
+ if (!(ns->regs.command == NAND_CMD_READID &&
+ NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
+ /*
+ * We are in situation when something else (not command)
+ * was expected but command was input. In this case ignore
+ * previous command(s)/state(s) and accept the last one.
+ */
+ NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, ignore previous states\n",
+ (uint)byte,
+ ns_get_state_name(ns->nxstate));
+ }
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ }
+
+ NS_DBG("command byte corresponding to %s state accepted\n",
+ ns_get_state_name(ns_get_state_by_command(byte)));
+ ns->regs.command = byte;
+ ns_switch_state(ns);
+
+ } else if (ns->lines.ale == 1) {
+ /*
+ * The byte written is an address.
+ */
+
+ if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
+
+ NS_DBG("write_byte: operation isn't known yet, identify it\n");
+
+ if (ns_find_operation(ns, 1) < 0)
+ return;
+
+ if ((ns->state & ACTION_MASK) &&
+ ns_do_state_action(ns, ns->state) < 0) {
+ ns_switch_to_ready_state(ns,
+ NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ ns->regs.count = 0;
+ switch (NS_STATE(ns->nxstate)) {
+ case STATE_ADDR_PAGE:
+ ns->regs.num = ns->geom.pgaddrbytes;
+ break;
+ case STATE_ADDR_SEC:
+ ns->regs.num = ns->geom.secaddrbytes;
+ break;
+ case STATE_ADDR_ZERO:
+ ns->regs.num = 1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ /* Check that chip is expecting address */
+ if (!(ns->nxstate & STATE_ADDR_MASK)) {
+ NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, switch to STATE_READY\n",
+ (uint)byte, ns_get_state_name(ns->nxstate));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if this is expected byte */
+ if (ns->regs.count == ns->regs.num) {
+ NS_ERR("write_byte: no more address bytes expected\n");
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ ns_accept_addr_byte(ns, byte);
+
+ ns->regs.count += 1;
+
+ NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
+ (uint)byte, ns->regs.count, ns->regs.num);
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
+ ns_switch_state(ns);
+ }
+
+ } else {
+ /*
+ * The byte written is an input data.
+ */
+
+ /* Check that chip is expecting data input */
+ if (!(ns->state & STATE_DATAIN_MASK)) {
+ NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, switch to %s\n",
+ (uint)byte, ns_get_state_name(ns->state),
+ ns_get_state_name(STATE_READY));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if this is expected byte */
+ if (ns->regs.count == ns->regs.num) {
+ NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
+ ns->regs.num);
+ return;
+ }
+
+ if (ns->busw == 8) {
+ ns->buf.byte[ns->regs.count] = byte;
+ ns->regs.count += 1;
+ } else {
+ ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
+ ns->regs.count += 2;
+ }
+ }
+
+ return;
+}
+
+static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf,
+ int len)
+{
+ struct nandsim *ns = nand_get_controller_data(chip);
+
+ /* Check that chip is expecting data input */
+ if (!(ns->state & STATE_DATAIN_MASK)) {
+ NS_ERR("write_buf: data input isn't expected, state is %s, switch to STATE_READY\n",
+ ns_get_state_name(ns->state));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if these are expected bytes */
+ if (ns->regs.count + len > ns->regs.num) {
+ NS_ERR("write_buf: too many input bytes\n");
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ memcpy(ns->buf.byte + ns->regs.count, buf, len);
+ ns->regs.count += len;
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
+ }
+}
+
+static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ struct nandsim *ns = nand_get_controller_data(chip);
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("read_buf: chip is disabled\n");
+ return;
+ }
+ if (ns->lines.ale || ns->lines.cle) {
+ NS_ERR("read_buf: ALE or CLE pin is high\n");
+ return;
+ }
+ if (!(ns->state & STATE_DATAOUT_MASK)) {
+ NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
+ ns_get_state_name(ns->state));
+ return;
+ }
+
+ if (NS_STATE(ns->state) != STATE_DATAOUT) {
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = ns_nand_read_byte(chip);
+
+ return;
+ }
+
+ /* Check if these are expected bytes */
+ if (ns->regs.count + len > ns->regs.num) {
+ NS_ERR("read_buf: too many bytes to read\n");
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ memcpy(buf, ns->buf.byte + ns->regs.count, len);
+ ns->regs.count += len;
+
+ if (ns->regs.count == ns->regs.num) {
+ if (NS_STATE(ns->nxstate) == STATE_READY)
+ ns_switch_state(ns);
+ }
+
+ return;
+}
+
+static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ bool check_only)
+{
+ int i;
+ unsigned int op_id;
+ const struct nand_op_instr *instr = NULL;
+ struct nandsim *ns = nand_get_controller_data(chip);
+
+ if (check_only)
+ return 0;
+
+ ns->lines.ce = 1;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+ ns->lines.cle = 0;
+ ns->lines.ale = 0;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ns->lines.cle = 1;
+ ns_nand_write_byte(chip, instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ ns->lines.ale = 1;
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ ns_nand_write_byte(chip, instr->ctx.addr.addrs[i]);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ ns_nand_read_buf(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ ns_nand_write_buf(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ /* we are always ready */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int ns_attach_chip(struct nand_chip *chip)
+{
+ unsigned int eccsteps, eccbytes;
+
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
+ if (!bch)
+ return 0;
+
+ if (!mtd_nand_has_bch()) {
+ NS_ERR("BCH ECC support is disabled\n");
+ return -EINVAL;
+ }
+
+ /* Use 512-byte ecc blocks */
+ eccsteps = nsmtd->writesize / 512;
+ eccbytes = ((bch * 13) + 7) / 8;
+
+ /* Do not bother supporting small page devices */
+ if (nsmtd->oobsize < 64 || !eccsteps) {
+ NS_ERR("BCH not available on small page devices\n");
+ return -EINVAL;
+ }
+
+ if (((eccbytes * eccsteps) + 2) > nsmtd->oobsize) {
+ NS_ERR("Invalid BCH value %u\n", bch);
+ return -EINVAL;
+ }
+
+ chip->ecc.size = 512;
+ chip->ecc.strength = bch;
+ chip->ecc.bytes = eccbytes;
+
+ NS_INFO("Using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
+
+ return 0;
+}
+
+static const struct nand_controller_ops ns_controller_ops = {
+ .attach_chip = ns_attach_chip,
+ .exec_op = ns_exec_op,
+};
+
+/*
+ * Module initialization function
+ */
+static int __init ns_init_module(void)
+{
+ struct list_head *pos, *n;
+ struct nand_chip *chip;
+ struct nandsim *ns;
+ int ret;
+
+ if (bus_width != 8 && bus_width != 16) {
+ NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
+ return -EINVAL;
+ }
+
+ ns = kzalloc(sizeof(struct nandsim), GFP_KERNEL);
+ if (!ns) {
+ NS_ERR("unable to allocate core structures.\n");
+ return -ENOMEM;
+ }
+ chip = &ns->chip;
+ nsmtd = nand_to_mtd(chip);
+ nand_set_controller_data(chip, (void *)ns);
+
+ /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
+ /* and 'badblocks' parameters to work */
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ switch (bbt) {
+ case 2:
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ fallthrough;
+ case 1:
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ fallthrough;
+ case 0:
+ break;
+ default:
+ NS_ERR("bbt has to be 0..2\n");
+ ret = -EINVAL;
+ goto free_ns_struct;
+ }
+ /*
+ * Perform minimum nandsim structure initialization to handle
+ * the initial ID read command correctly
+ */
+ if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
+ ns->geom.idbytes = 8;
+ else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
+ ns->geom.idbytes = 6;
+ else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
+ ns->geom.idbytes = 4;
+ else
+ ns->geom.idbytes = 2;
+ ns->regs.status = NS_STATUS_OK(ns);
+ ns->nxstate = STATE_UNKNOWN;
+ ns->options |= OPT_PAGE512; /* temporary value */
+ memcpy(ns->ids, id_bytes, sizeof(ns->ids));
+ if (bus_width == 16) {
+ ns->busw = 16;
+ chip->options |= NAND_BUSWIDTH_16;
+ }
+
+ nsmtd->owner = THIS_MODULE;
+
+ ret = ns_parse_weakblocks();
+ if (ret)
+ goto free_ns_struct;
+
+ ret = ns_parse_weakpages();
+ if (ret)
+ goto free_wb_list;
+
+ ret = ns_parse_gravepages();
+ if (ret)
+ goto free_wp_list;
+
+ nand_controller_init(&ns->base);
+ ns->base.ops = &ns_controller_ops;
+ chip->controller = &ns->base;
+
+ ret = nand_scan(chip, 1);
+ if (ret) {
+ NS_ERR("Could not scan NAND Simulator device\n");
+ goto free_gp_list;
+ }
+
+ if (overridesize) {
+ uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+ struct nand_memory_organization *memorg;
+ u64 targetsize;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ if (new_size >> overridesize != nsmtd->erasesize) {
+ NS_ERR("overridesize is too big\n");
+ ret = -EINVAL;
+ goto cleanup_nand;
+ }
+
+ /* N.B. This relies on nand_scan not doing anything with the size before we change it */
+ nsmtd->size = new_size;
+ memorg->eraseblocks_per_lun = 1 << overridesize;
+ targetsize = nanddev_target_size(&chip->base);
+ chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
+ chip->pagemask = (targetsize >> chip->page_shift) - 1;
+ }
+
+ ret = ns_setup_wear_reporting(nsmtd);
+ if (ret)
+ goto cleanup_nand;
+
+ ret = ns_init(nsmtd);
+ if (ret)
+ goto free_ebw;
+
+ ret = nand_create_bbt(chip);
+ if (ret)
+ goto free_ns_object;
+
+ ret = ns_parse_badblocks(ns, nsmtd);
+ if (ret)
+ goto free_ns_object;
+
+ /* Register NAND partitions */
+ ret = mtd_device_register(nsmtd, &ns->partitions[0], ns->nbparts);
+ if (ret)
+ goto free_ns_object;
+
+ ret = ns_debugfs_create(ns);
+ if (ret)
+ goto unregister_mtd;
+
+ return 0;
+
+unregister_mtd:
+ WARN_ON(mtd_device_unregister(nsmtd));
+free_ns_object:
+ ns_free(ns);
+free_ebw:
+ kfree(erase_block_wear);
+cleanup_nand:
+ nand_cleanup(chip);
+free_gp_list:
+ list_for_each_safe(pos, n, &grave_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct grave_page, list));
+ }
+free_wp_list:
+ list_for_each_safe(pos, n, &weak_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_page, list));
+ }
+free_wb_list:
+ list_for_each_safe(pos, n, &weak_blocks) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_block, list));
+ }
+free_ns_struct:
+ kfree(ns);
+
+ return ret;
+}
+
+module_init(ns_init_module);
+
+/*
+ * Module clean-up function
+ */
+static void __exit ns_cleanup_module(void)
+{
+ struct nand_chip *chip = mtd_to_nand(nsmtd);
+ struct nandsim *ns = nand_get_controller_data(chip);
+ struct list_head *pos, *n;
+
+ ns_debugfs_remove(ns);
+ WARN_ON(mtd_device_unregister(nsmtd));
+ ns_free(ns);
+ kfree(erase_block_wear);
+ nand_cleanup(chip);
+
+ list_for_each_safe(pos, n, &grave_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct grave_page, list));
+ }
+
+ list_for_each_safe(pos, n, &weak_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_page, list));
+ }
+
+ list_for_each_safe(pos, n, &weak_blocks) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_block, list));
+ }
+
+ kfree(ns);
+}
+
+module_exit(ns_cleanup_module);
+
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Artem B. Bityuckiy");
+MODULE_DESCRIPTION ("The NAND flash simulator");
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
new file mode 100644
index 000000000..0fb4ba93c
--- /dev/null
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Overview:
+ * Platform independent driver for NDFC (NanD Flash Controller)
+ * integrated into EP440 cores
+ *
+ * Ported to an OF platform driver by Sean MacLennan
+ *
+ * The NDFC supports multiple chips, but this driver only supports a
+ * single chip since I do not have access to any boards with
+ * multiple chips.
+ *
+ * Author: Thomas Gleixner
+ *
+ * Copyright 2006 IBM
+ * Copyright 2008 PIKA Technologies
+ * Sean MacLennan <smaclennan@pikatech.com>
+ */
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/ndfc.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+
+#define NDFC_MAX_CS 4
+
+struct ndfc_controller {
+ struct platform_device *ofdev;
+ void __iomem *ndfcbase;
+ struct nand_chip chip;
+ int chip_select;
+ struct nand_controller ndfc_control;
+};
+
+static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
+
+static void ndfc_select_chip(struct nand_chip *nchip, int chip)
+{
+ uint32_t ccr;
+ struct ndfc_controller *ndfc = nand_get_controller_data(nchip);
+
+ ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
+ if (chip >= 0) {
+ ccr &= ~NDFC_CCR_BS_MASK;
+ ccr |= NDFC_CCR_BS(chip + ndfc->chip_select);
+ } else
+ ccr |= NDFC_CCR_RESET_CE;
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+}
+
+static void ndfc_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
+{
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD);
+ else
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
+}
+
+static int ndfc_ready(struct nand_chip *chip)
+{
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+
+ return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
+}
+
+static void ndfc_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ uint32_t ccr;
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+
+ ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
+ ccr |= NDFC_CCR_RESET_ECC;
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+ wmb();
+}
+
+static int ndfc_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+ uint32_t ecc;
+ uint8_t *p = (uint8_t *)&ecc;
+
+ wmb();
+ ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
+ /* The NDFC uses Smart Media (SMC) bytes order */
+ ecc_code[0] = p[1];
+ ecc_code[1] = p[2];
+ ecc_code[2] = p[3];
+
+ return 0;
+}
+
+/*
+ * Speedups for buffer read/write/verify
+ *
+ * NDFC allows 32bit read/write of data. So we can speed up the buffer
+ * functions. No further checking, as nand_base will always read/write
+ * page aligned.
+ */
+static void ndfc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+ uint32_t *p = (uint32_t *) buf;
+
+ for(;len > 0; len -= 4)
+ *p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
+}
+
+static void ndfc_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+ uint32_t *p = (uint32_t *) buf;
+
+ for(;len > 0; len -= 4)
+ out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
+}
+
+/*
+ * Initialize chip structure
+ */
+static int ndfc_chip_init(struct ndfc_controller *ndfc,
+ struct device_node *node)
+{
+ struct device_node *flash_np;
+ struct nand_chip *chip = &ndfc->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ chip->legacy.IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
+ chip->legacy.IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
+ chip->legacy.cmd_ctrl = ndfc_hwcontrol;
+ chip->legacy.dev_ready = ndfc_ready;
+ chip->legacy.select_chip = ndfc_select_chip;
+ chip->legacy.chip_delay = 50;
+ chip->controller = &ndfc->ndfc_control;
+ chip->legacy.read_buf = ndfc_read_buf;
+ chip->legacy.write_buf = ndfc_write_buf;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.hwctl = ndfc_enable_hwecc;
+ chip->ecc.calculate = ndfc_calculate_ecc;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ nand_set_controller_data(chip, ndfc);
+
+ mtd->dev.parent = &ndfc->ofdev->dev;
+
+ flash_np = of_get_next_child(node, NULL);
+ if (!flash_np)
+ return -ENODEV;
+ nand_set_flash_node(chip, flash_np);
+
+ mtd->name = kasprintf(GFP_KERNEL, "%s.%pOFn", dev_name(&ndfc->ofdev->dev),
+ flash_np);
+ if (!mtd->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ goto err;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+
+err:
+ of_node_put(flash_np);
+ if (ret)
+ kfree(mtd->name);
+ return ret;
+}
+
+static int ndfc_probe(struct platform_device *ofdev)
+{
+ struct ndfc_controller *ndfc;
+ const __be32 *reg;
+ u32 ccr;
+ u32 cs;
+ int err, len;
+
+ /* Read the reg property to get the chip select */
+ reg = of_get_property(ofdev->dev.of_node, "reg", &len);
+ if (reg == NULL || len != 12) {
+ dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
+ return -ENOENT;
+ }
+
+ cs = be32_to_cpu(reg[0]);
+ if (cs >= NDFC_MAX_CS) {
+ dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
+ return -EINVAL;
+ }
+
+ ndfc = &ndfc_ctrl[cs];
+ ndfc->chip_select = cs;
+
+ nand_controller_init(&ndfc->ndfc_control);
+ ndfc->ofdev = ofdev;
+ dev_set_drvdata(&ofdev->dev, ndfc);
+
+ ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
+ if (!ndfc->ndfcbase) {
+ dev_err(&ofdev->dev, "failed to get memory\n");
+ return -EIO;
+ }
+
+ ccr = NDFC_CCR_BS(ndfc->chip_select);
+
+ /* It is ok if ccr does not exist - just default to 0 */
+ reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
+ if (reg)
+ ccr |= be32_to_cpup(reg);
+
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+
+ /* Set the bank settings if given */
+ reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
+ if (reg) {
+ int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
+ out_be32(ndfc->ndfcbase + offset, be32_to_cpup(reg));
+ }
+
+ err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
+ if (err) {
+ iounmap(ndfc->ndfcbase);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ndfc_remove(struct platform_device *ofdev)
+{
+ struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
+ struct nand_chip *chip = &ndfc->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ kfree(mtd->name);
+
+ return 0;
+}
+
+static const struct of_device_id ndfc_match[] = {
+ { .compatible = "ibm,ndfc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ndfc_match);
+
+static struct platform_driver ndfc_driver = {
+ .driver = {
+ .name = "ndfc",
+ .of_match_table = ndfc_match,
+ },
+ .probe = ndfc_probe,
+ .remove = ndfc_remove,
+};
+
+module_platform_driver(ndfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("OF Platform driver for NDFC");
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
new file mode 100644
index 000000000..512f60780
--- /dev/null
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -0,0 +1,2319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
+ * Copyright © 2004 Micron Technology Inc.
+ * Copyright © 2004 David Brownell
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/mtd/nand_bch.h>
+#include <linux/platform_data/elm.h>
+
+#include <linux/omap-gpmc.h>
+#include <linux/platform_data/mtd-nand-omap2.h>
+
+#define DRIVER_NAME "omap2-nand"
+#define OMAP_NAND_TIMEOUT_MS 5000
+
+#define NAND_Ecc_P1e (1 << 0)
+#define NAND_Ecc_P2e (1 << 1)
+#define NAND_Ecc_P4e (1 << 2)
+#define NAND_Ecc_P8e (1 << 3)
+#define NAND_Ecc_P16e (1 << 4)
+#define NAND_Ecc_P32e (1 << 5)
+#define NAND_Ecc_P64e (1 << 6)
+#define NAND_Ecc_P128e (1 << 7)
+#define NAND_Ecc_P256e (1 << 8)
+#define NAND_Ecc_P512e (1 << 9)
+#define NAND_Ecc_P1024e (1 << 10)
+#define NAND_Ecc_P2048e (1 << 11)
+
+#define NAND_Ecc_P1o (1 << 16)
+#define NAND_Ecc_P2o (1 << 17)
+#define NAND_Ecc_P4o (1 << 18)
+#define NAND_Ecc_P8o (1 << 19)
+#define NAND_Ecc_P16o (1 << 20)
+#define NAND_Ecc_P32o (1 << 21)
+#define NAND_Ecc_P64o (1 << 22)
+#define NAND_Ecc_P128o (1 << 23)
+#define NAND_Ecc_P256o (1 << 24)
+#define NAND_Ecc_P512o (1 << 25)
+#define NAND_Ecc_P1024o (1 << 26)
+#define NAND_Ecc_P2048o (1 << 27)
+
+#define TF(value) (value ? 1 : 0)
+
+#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
+#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
+#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
+#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
+#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
+#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
+#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
+#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
+
+#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
+#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
+#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
+#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
+
+#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
+#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
+#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
+#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
+#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
+#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
+#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
+#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
+
+#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
+#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
+#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
+#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
+
+#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
+#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+
+#define PREFETCH_CONFIG1_CS_SHIFT 24
+#define ECC_CONFIG_CS_SHIFT 1
+#define CS_MASK 0x7
+#define ENABLE_PREFETCH (0x1 << 7)
+#define DMA_MPU_MODE_SHIFT 2
+#define ECCSIZE0_SHIFT 12
+#define ECCSIZE1_SHIFT 22
+#define ECC1RESULTSIZE 0x1
+#define ECCCLEAR 0x100
+#define ECC1 0x1
+#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
+#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
+#define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
+#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
+#define STATUS_BUFF_EMPTY 0x00000001
+
+#define SECTOR_BYTES 512
+/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
+#define BCH4_BIT_PAD 4
+
+/* GPMC ecc engine settings for read */
+#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
+#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
+#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
+#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
+#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
+
+/* GPMC ecc engine settings for write */
+#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
+#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
+#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
+
+#define BADBLOCK_MARKER_LENGTH 2
+
+static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
+ 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
+ 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
+ 0x07, 0x0e};
+static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
+ 0xac, 0x6b, 0xff, 0x99, 0x7b};
+static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
+
+struct omap_nand_info {
+ struct nand_chip nand;
+ struct platform_device *pdev;
+
+ int gpmc_cs;
+ bool dev_ready;
+ enum nand_io xfer_type;
+ int devsize;
+ enum omap_ecc ecc_opt;
+ struct device_node *elm_of_node;
+
+ unsigned long phys_base;
+ struct completion comp;
+ struct dma_chan *dma;
+ int gpmc_irq_fifo;
+ int gpmc_irq_count;
+ enum {
+ OMAP_NAND_IO_READ = 0, /* read */
+ OMAP_NAND_IO_WRITE, /* write */
+ } iomode;
+ u_char *buf;
+ int buf_len;
+ /* Interface to GPMC */
+ struct gpmc_nand_regs reg;
+ struct gpmc_nand_ops *ops;
+ bool flash_bbt;
+ /* fields specific for BCHx_HW ECC scheme */
+ struct device *elm_dev;
+ /* NAND ready gpio */
+ struct gpio_desc *ready_gpiod;
+};
+
+static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
+}
+
+/**
+ * omap_prefetch_enable - configures and starts prefetch transfer
+ * @cs: cs (chip select) number
+ * @fifo_th: fifo threshold to be used for read/ write
+ * @dma_mode: dma mode enable (1) or disable (0)
+ * @u32_count: number of bytes to be transferred
+ * @is_write: prefetch read(0) or write post(1) mode
+ */
+static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
+ unsigned int u32_count, int is_write, struct omap_nand_info *info)
+{
+ u32 val;
+
+ if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
+ return -1;
+
+ if (readl(info->reg.gpmc_prefetch_control))
+ return -EBUSY;
+
+ /* Set the amount of bytes to be prefetched */
+ writel(u32_count, info->reg.gpmc_prefetch_config2);
+
+ /* Set dma/mpu mode, the prefetch read / post write and
+ * enable the engine. Set which cs is has requested for.
+ */
+ val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
+ PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
+ (dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
+ writel(val, info->reg.gpmc_prefetch_config1);
+
+ /* Start the prefetch engine */
+ writel(0x1, info->reg.gpmc_prefetch_control);
+
+ return 0;
+}
+
+/**
+ * omap_prefetch_reset - disables and stops the prefetch engine
+ */
+static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
+{
+ u32 config1;
+
+ /* check if the same module/cs is trying to reset */
+ config1 = readl(info->reg.gpmc_prefetch_config1);
+ if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
+ return -EINVAL;
+
+ /* Stop the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_control);
+
+ /* Reset/disable the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_config1);
+
+ return 0;
+}
+
+/**
+ * omap_hwcontrol - hardware specific access to control-lines
+ * @chip: NAND chip object
+ * @cmd: command to device
+ * @ctrl:
+ * NAND_NCE: bit 0 -> don't care
+ * NAND_CLE: bit 1 -> Command Latch
+ * NAND_ALE: bit 2 -> Address Latch
+ *
+ * NOTE: boards may use different bits for these!!
+ */
+static void omap_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writeb(cmd, info->reg.gpmc_nand_command);
+
+ else if (ctrl & NAND_ALE)
+ writeb(cmd, info->reg.gpmc_nand_address);
+
+ else /* NAND_NCE */
+ writeb(cmd, info->reg.gpmc_nand_data);
+ }
+}
+
+/**
+ * omap_read_buf8 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ ioread8_rep(nand->legacy.IO_ADDR_R, buf, len);
+}
+
+/**
+ * omap_write_buf8 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ u_char *p = (u_char *)buf;
+ bool status;
+
+ while (len--) {
+ iowrite8(*p++, info->nand.legacy.IO_ADDR_W);
+ /* wait until buffer is available for write */
+ do {
+ status = info->ops->nand_writebuffer_empty();
+ } while (!status);
+ }
+}
+
+/**
+ * omap_read_buf16 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ ioread16_rep(nand->legacy.IO_ADDR_R, buf, len / 2);
+}
+
+/**
+ * omap_write_buf16 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ u16 *p = (u16 *) buf;
+ bool status;
+ /* FIXME try bursts of writesw() or DMA ... */
+ len >>= 1;
+
+ while (len--) {
+ iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
+ /* wait until buffer is available for write */
+ do {
+ status = info->ops->nand_writebuffer_empty();
+ } while (!status);
+ }
+}
+
+/**
+ * omap_read_buf_pref - read data from NAND controller into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_pref(struct nand_chip *chip, u_char *buf, int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ uint32_t r_count = 0;
+ int ret = 0;
+ u32 *p = (u32 *)buf;
+
+ /* take care of subpage reads */
+ if (len % 4) {
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len % 4);
+ else
+ omap_read_buf8(mtd, buf, len % 4);
+ p = (u32 *) (buf + len % 4);
+ len -= len % 4;
+ }
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, (u_char *)p, len);
+ else
+ omap_read_buf8(mtd, (u_char *)p, len);
+ } else {
+ do {
+ r_count = readl(info->reg.gpmc_prefetch_status);
+ r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
+ r_count = r_count >> 2;
+ ioread32_rep(info->nand.legacy.IO_ADDR_R, p, r_count);
+ p += r_count;
+ len -= r_count << 2;
+ } while (len);
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ }
+}
+
+/**
+ * omap_write_buf_pref - write buffer to NAND controller
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ uint32_t w_count = 0;
+ int i = 0, ret = 0;
+ u16 *p = (u16 *)buf;
+ unsigned long tim, limit;
+ u32 val;
+
+ /* take care of subpage writes */
+ if (len % 2 != 0) {
+ writeb(*buf, info->nand.legacy.IO_ADDR_W);
+ p = (u16 *)(buf + 1);
+ len--;
+ }
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, (u_char *)p, len);
+ else
+ omap_write_buf8(mtd, (u_char *)p, len);
+ } else {
+ while (len) {
+ w_count = readl(info->reg.gpmc_prefetch_status);
+ w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
+ w_count = w_count >> 1;
+ for (i = 0; (i < w_count) && len; i++, len -= 2)
+ iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
+ }
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy *
+ msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ do {
+ cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ }
+}
+
+/*
+ * omap_nand_dma_callback: callback on the completion of dma transfer
+ * @data: pointer to completion data structure
+ */
+static void omap_nand_dma_callback(void *data)
+{
+ complete((struct completion *) data);
+}
+
+/*
+ * omap_nand_dma_transfer: configure and start dma transfer
+ * @mtd: MTD device structure
+ * @addr: virtual address in RAM of source/destination
+ * @len: number of data bytes to be transferred
+ * @is_write: flag for read/write operation
+ */
+static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
+ unsigned int len, int is_write)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct dma_async_tx_descriptor *tx;
+ enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE;
+ struct scatterlist sg;
+ unsigned long tim, limit;
+ unsigned n;
+ int ret;
+ u32 val;
+
+ if (!virt_addr_valid(addr))
+ goto out_copy;
+
+ sg_init_one(&sg, addr, len);
+ n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+ if (n == 0) {
+ dev_err(&info->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n", len);
+ goto out_copy;
+ }
+
+ tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+ is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto out_copy_unmap;
+
+ tx->callback = omap_nand_dma_callback;
+ tx->callback_param = &info->comp;
+ dmaengine_submit(tx);
+
+ init_completion(&info->comp);
+
+ /* setup and start DMA using dma_addr */
+ dma_async_issue_pending(info->dma);
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy_unmap;
+
+ wait_for_completion(&info->comp);
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+
+ do {
+ cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+ return 0;
+
+out_copy_unmap:
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
+ : omap_write_buf16(mtd, (u_char *) addr, len);
+ else
+ is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
+ : omap_write_buf8(mtd, (u_char *) addr, len);
+ return 0;
+}
+
+/**
+ * omap_read_buf_dma_pref - read data from NAND controller into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_dma_pref(struct nand_chip *chip, u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len <= mtd->oobsize)
+ omap_read_buf_pref(chip, buf, len);
+ else
+ /* start transfer in DMA mode */
+ omap_nand_dma_transfer(mtd, buf, len, 0x0);
+}
+
+/**
+ * omap_write_buf_dma_pref - write buffer to NAND controller
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_dma_pref(struct nand_chip *chip, const u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len <= mtd->oobsize)
+ omap_write_buf_pref(chip, buf, len);
+ else
+ /* start transfer in DMA mode */
+ omap_nand_dma_transfer(mtd, (u_char *)buf, len, 0x1);
+}
+
+/*
+ * omap_nand_irq - GPMC irq handler
+ * @this_irq: gpmc irq number
+ * @dev: omap_nand_info structure pointer is passed here
+ */
+static irqreturn_t omap_nand_irq(int this_irq, void *dev)
+{
+ struct omap_nand_info *info = (struct omap_nand_info *) dev;
+ u32 bytes;
+
+ bytes = readl(info->reg.gpmc_prefetch_status);
+ bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
+ bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
+ if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
+ if (this_irq == info->gpmc_irq_count)
+ goto done;
+
+ if (info->buf_len && (info->buf_len < bytes))
+ bytes = info->buf_len;
+ else if (!info->buf_len)
+ bytes = 0;
+ iowrite32_rep(info->nand.legacy.IO_ADDR_W, (u32 *)info->buf,
+ bytes >> 2);
+ info->buf = info->buf + bytes;
+ info->buf_len -= bytes;
+
+ } else {
+ ioread32_rep(info->nand.legacy.IO_ADDR_R, (u32 *)info->buf,
+ bytes >> 2);
+ info->buf = info->buf + bytes;
+
+ if (this_irq == info->gpmc_irq_count)
+ goto done;
+ }
+
+ return IRQ_HANDLED;
+
+done:
+ complete(&info->comp);
+
+ disable_irq_nosync(info->gpmc_irq_fifo);
+ disable_irq_nosync(info->gpmc_irq_count);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * omap_read_buf_irq_pref - read data from NAND controller into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int ret = 0;
+
+ if (len <= mtd->oobsize) {
+ omap_read_buf_pref(chip, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_READ;
+ info->buf = buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
+
+ /* waiting for read to complete */
+ wait_for_completion(&info->comp);
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len);
+ else
+ omap_read_buf8(mtd, buf, len);
+}
+
+/*
+ * omap_write_buf_irq_pref - write buffer to NAND controller
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int ret = 0;
+ unsigned long tim, limit;
+ u32 val;
+
+ if (len <= mtd->oobsize) {
+ omap_write_buf_pref(chip, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_WRITE;
+ info->buf = (u_char *) buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer : size=24 */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
+
+ /* waiting for write to complete */
+ wait_for_completion(&info->comp);
+
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ do {
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ cpu_relax();
+ } while (val && (tim++ < limit));
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, buf, len);
+ else
+ omap_write_buf8(mtd, buf, len);
+}
+
+/**
+ * gen_true_ecc - This function will generate true ECC value
+ * @ecc_buf: buffer to store ecc code
+ *
+ * This generated true ECC value can be used when correcting
+ * data read from NAND flash memory core
+ */
+static void gen_true_ecc(u8 *ecc_buf)
+{
+ u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
+ ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
+
+ ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
+ P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
+ ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
+ P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
+ ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
+ P1e(tmp) | P2048o(tmp) | P2048e(tmp));
+}
+
+/**
+ * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
+ * @ecc_data1: ecc code from nand spare area
+ * @ecc_data2: ecc code from hardware register obtained from hardware ecc
+ * @page_data: page data
+ *
+ * This function compares two ECC's and indicates if there is an error.
+ * If the error can be corrected it will be corrected to the buffer.
+ * If there is no error, %0 is returned. If there is an error but it
+ * was corrected, %1 is returned. Otherwise, %-1 is returned.
+ */
+static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
+ u8 *ecc_data2, /* read from register */
+ u8 *page_data)
+{
+ uint i;
+ u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
+ u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
+ u8 ecc_bit[24];
+ u8 ecc_sum = 0;
+ u8 find_bit = 0;
+ uint find_byte = 0;
+ int isEccFF;
+
+ isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
+
+ gen_true_ecc(ecc_data1);
+ gen_true_ecc(ecc_data2);
+
+ for (i = 0; i <= 2; i++) {
+ *(ecc_data1 + i) = ~(*(ecc_data1 + i));
+ *(ecc_data2 + i) = ~(*(ecc_data2 + i));
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp0_bit[i] = *ecc_data1 % 2;
+ *ecc_data1 = *ecc_data1 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp1_bit[i] = *(ecc_data1 + 1) % 2;
+ *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp2_bit[i] = *(ecc_data1 + 2) % 2;
+ *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp0_bit[i] = *ecc_data2 % 2;
+ *ecc_data2 = *ecc_data2 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp1_bit[i] = *(ecc_data2 + 1) % 2;
+ *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp2_bit[i] = *(ecc_data2 + 2) % 2;
+ *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
+ }
+
+ for (i = 0; i < 6; i++)
+ ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
+
+ ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
+ ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
+
+ for (i = 0; i < 24; i++)
+ ecc_sum += ecc_bit[i];
+
+ switch (ecc_sum) {
+ case 0:
+ /* Not reached because this function is not called if
+ * ECC values are equal
+ */
+ return 0;
+
+ case 1:
+ /* Uncorrectable error */
+ pr_debug("ECC UNCORRECTED_ERROR 1\n");
+ return -EBADMSG;
+
+ case 11:
+ /* UN-Correctable error */
+ pr_debug("ECC UNCORRECTED_ERROR B\n");
+ return -EBADMSG;
+
+ case 12:
+ /* Correctable error */
+ find_byte = (ecc_bit[23] << 8) +
+ (ecc_bit[21] << 7) +
+ (ecc_bit[19] << 6) +
+ (ecc_bit[17] << 5) +
+ (ecc_bit[15] << 4) +
+ (ecc_bit[13] << 3) +
+ (ecc_bit[11] << 2) +
+ (ecc_bit[9] << 1) +
+ ecc_bit[7];
+
+ find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
+
+ pr_debug("Correcting single bit ECC error at offset: "
+ "%d, bit: %d\n", find_byte, find_bit);
+
+ page_data[find_byte] ^= (1 << find_bit);
+
+ return 1;
+ default:
+ if (isEccFF) {
+ if (ecc_data2[0] == 0 &&
+ ecc_data2[1] == 0 &&
+ ecc_data2[2] == 0)
+ return 0;
+ }
+ pr_debug("UNCORRECTED_ERROR default\n");
+ return -EBADMSG;
+ }
+}
+
+/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @chip: NAND chip object
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Compares the ecc read from nand spare area with ECC registers values
+ * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
+ * detection and correction. If there are no errors, %0 is returned. If
+ * there were errors and all of the errors were corrected, the number of
+ * corrected errors is returned. If uncorrectable errors exist, %-1 is
+ * returned.
+ */
+static int omap_correct_data(struct nand_chip *chip, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ int blockCnt = 0, i = 0, ret = 0;
+ int stat = 0;
+
+ /* Ex NAND_ECC_HW12_2048 */
+ if (info->nand.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
+ info->nand.ecc.size == 2048)
+ blockCnt = 4;
+ else
+ blockCnt = 1;
+
+ for (i = 0; i < blockCnt; i++) {
+ if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+ ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+ if (ret < 0)
+ return ret;
+ /* keep track of the number of corrected errors */
+ stat += ret;
+ }
+ read_ecc += 3;
+ calc_ecc += 3;
+ dat += 512;
+ }
+ return stat;
+}
+
+/**
+ * omap_calcuate_ecc - Generate non-inverted ECC bytes.
+ * @chip: NAND chip object
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as long
+ * nobody is trying to write data on the seemingly unused page. Reading
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ u32 val;
+
+ val = readl(info->reg.gpmc_ecc_config);
+ if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
+ return -EINVAL;
+
+ /* read ecc result */
+ val = readl(info->reg.gpmc_ecc1_result);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+
+ return 0;
+}
+
+/**
+ * omap_enable_hwecc - This function enables the hardware ecc functionality
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ u32 val;
+
+ /* clear ecc and enable bits */
+ val = ECCCLEAR | ECC1;
+ writel(val, info->reg.gpmc_ecc_control);
+
+ /* program ecc and result sizes */
+ val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
+ ECC1RESULTSIZE);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+ break;
+ case NAND_ECC_READSYN:
+ writel(ECCCLEAR, info->reg.gpmc_ecc_control);
+ break;
+ default:
+ dev_info(&info->pdev->dev,
+ "error: unrecognized Mode[%d]!\n", mode);
+ break;
+ }
+
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ writel(val, info->reg.gpmc_ecc_config);
+}
+
+/**
+ * omap_wait - wait until the command is done
+ * @this: NAND Chip structure
+ *
+ * Wait function is called during Program and erase operations and
+ * the way it is called from MTD layer, we should wait till the NAND
+ * chip is ready after the programming/erase operation has completed.
+ *
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ */
+static int omap_wait(struct nand_chip *this)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
+ unsigned long timeo = jiffies;
+ int status;
+
+ timeo += msecs_to_jiffies(400);
+
+ writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
+ while (time_before(jiffies, timeo)) {
+ status = readb(info->reg.gpmc_nand_data);
+ if (status & NAND_STATUS_READY)
+ break;
+ cond_resched();
+ }
+
+ status = readb(info->reg.gpmc_nand_data);
+ return status;
+}
+
+/**
+ * omap_dev_ready - checks the NAND Ready GPIO line
+ * @mtd: MTD device structure
+ *
+ * Returns true if ready and false if busy.
+ */
+static int omap_dev_ready(struct nand_chip *chip)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+
+ return gpiod_get_value(info->ready_gpiod);
+}
+
+/**
+ * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ *
+ * When using BCH with SW correction (i.e. no ELM), sector size is set
+ * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
+ * for both reading and writing with:
+ * eccsize0 = 0 (no additional protected byte in spare area)
+ * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
+ */
+static void __maybe_unused omap_enable_hwecc_bch(struct nand_chip *chip,
+ int mode)
+{
+ unsigned int bch_type;
+ unsigned int dev_width, nsectors;
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ enum omap_ecc ecc_opt = info->ecc_opt;
+ u32 val, wr_mode;
+ unsigned int ecc_size1, ecc_size0;
+
+ /* GPMC configurations for calculating ECC */
+ switch (ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ bch_type = 0;
+ nsectors = 1;
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_type = 0;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_1;
+ ecc_size0 = BCH4R_ECC_SIZE0;
+ ecc_size1 = BCH4R_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ bch_type = 1;
+ nsectors = 1;
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_type = 1;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_1;
+ ecc_size0 = BCH8R_ECC_SIZE0;
+ ecc_size1 = BCH8R_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ bch_type = 0x2;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = 0x01;
+ ecc_size0 = 52; /* ECC bits in nibbles per sector */
+ ecc_size1 = 0; /* non-ECC bits in nibbles per sector */
+ } else {
+ wr_mode = 0x01;
+ ecc_size0 = 0; /* extra bits in nibbles per sector */
+ ecc_size1 = 52; /* OOB bits in nibbles per sector */
+ }
+ break;
+ default:
+ return;
+ }
+
+ writel(ECC1, info->reg.gpmc_ecc_control);
+
+ /* Configure ecc size for BCH */
+ val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
+ /* BCH configuration */
+ val = ((1 << 16) | /* enable BCH */
+ (bch_type << 12) | /* BCH4/BCH8/BCH16 */
+ (wr_mode << 8) | /* wrap mode */
+ (dev_width << 7) | /* bus width */
+ (((nsectors-1) & 0x7) << 4) | /* number of sectors */
+ (info->gpmc_cs << 1) | /* ECC CS */
+ (0x1)); /* enable ECC */
+
+ writel(val, info->reg.gpmc_ecc_config);
+
+ /* Clear ecc and enable bits */
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+}
+
+static u8 bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
+static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
+ 0x97, 0x79, 0xe5, 0x24, 0xb5};
+
+/**
+ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ * @i: The sector number (for a multi sector page)
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector
+ * within a page. Sector number is in @i.
+ */
+static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc, int i)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ struct gpmc_nand_regs *gpmc_regs = &info->reg;
+ u8 *ecc_code;
+ unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
+ u32 val;
+ int j;
+
+ ecc_code = ecc_calc;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ break;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(gpmc_regs->gpmc_bch_result6[i]);
+ ecc_code[0] = ((val >> 8) & 0xFF);
+ ecc_code[1] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result5[i]);
+ ecc_code[2] = ((val >> 24) & 0xFF);
+ ecc_code[3] = ((val >> 16) & 0xFF);
+ ecc_code[4] = ((val >> 8) & 0xFF);
+ ecc_code[5] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result4[i]);
+ ecc_code[6] = ((val >> 24) & 0xFF);
+ ecc_code[7] = ((val >> 16) & 0xFF);
+ ecc_code[8] = ((val >> 8) & 0xFF);
+ ecc_code[9] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result3[i]);
+ ecc_code[10] = ((val >> 24) & 0xFF);
+ ecc_code[11] = ((val >> 16) & 0xFF);
+ ecc_code[12] = ((val >> 8) & 0xFF);
+ ecc_code[13] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result2[i]);
+ ecc_code[14] = ((val >> 24) & 0xFF);
+ ecc_code[15] = ((val >> 16) & 0xFF);
+ ecc_code[16] = ((val >> 8) & 0xFF);
+ ecc_code[17] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result1[i]);
+ ecc_code[18] = ((val >> 24) & 0xFF);
+ ecc_code[19] = ((val >> 16) & 0xFF);
+ ecc_code[20] = ((val >> 8) & 0xFF);
+ ecc_code[21] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result0[i]);
+ ecc_code[22] = ((val >> 24) & 0xFF);
+ ecc_code[23] = ((val >> 16) & 0xFF);
+ ecc_code[24] = ((val >> 8) & 0xFF);
+ ecc_code[25] = ((val >> 0) & 0xFF);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch4_polynomial[j];
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Set 8th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch8_polynomial[j];
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* Set 14th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
+ * @chip: NAND chip object
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
+ * when SW based correction is required as ECC is required for one sector
+ * at a time.
+ */
+static int omap_calculate_ecc_bch_sw(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_calc)
+{
+ return _omap_calculate_ecc_bch(nand_to_mtd(chip), dat, ecc_calc, 0);
+}
+
+/**
+ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
+ */
+static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ unsigned long nsectors;
+ int i, ret;
+
+ nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+ for (i = 0; i < nsectors; i++) {
+ ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
+ if (ret)
+ return ret;
+
+ ecc_calc += eccbytes;
+ }
+
+ return 0;
+}
+
+/**
+ * erased_sector_bitflips - count bit flips
+ * @data: data sector buffer
+ * @oob: oob buffer
+ * @info: omap_nand_info
+ *
+ * Check the bit flips in erased page falls below correctable level.
+ * If falls below, report the page as erased with correctable bit
+ * flip, else report as uncorrectable page.
+ */
+static int erased_sector_bitflips(u_char *data, u_char *oob,
+ struct omap_nand_info *info)
+{
+ int flip_bits = 0, i;
+
+ for (i = 0; i < info->nand.ecc.size; i++) {
+ flip_bits += hweight8(~data[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
+ flip_bits += hweight8(~oob[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ /*
+ * Bit flips falls in correctable level.
+ * Fill data area with 0xFF
+ */
+ if (flip_bits) {
+ memset(data, 0xFF, info->nand.ecc.size);
+ memset(oob, 0xFF, info->nand.ecc.bytes);
+ }
+
+ return flip_bits;
+}
+
+/**
+ * omap_elm_correct_data - corrects page data area in case error reported
+ * @chip: NAND chip object
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Calculated ecc vector reported as zero in case of non-error pages.
+ * In case of non-zero ecc vector, first filter out erased-pages, and
+ * then process data via ELM to detect bit-flips.
+ */
+static int omap_elm_correct_data(struct nand_chip *chip, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct nand_ecc_ctrl *ecc = &info->nand.ecc;
+ int eccsteps = info->nand.ecc.steps;
+ int i , j, stat = 0;
+ int eccflag, actual_eccbytes;
+ struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
+ u_char *ecc_vec = calc_ecc;
+ u_char *spare_ecc = read_ecc;
+ u_char *erased_ecc_vec;
+ u_char *buf;
+ int bitflip_count;
+ bool is_error_reported = false;
+ u32 bit_pos, byte_pos, error_max, pos;
+ int err;
+
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* omit 7th ECC byte reserved for ROM code compatibility */
+ actual_eccbytes = ecc->bytes - 1;
+ erased_ecc_vec = bch4_vector;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* omit 14th ECC byte reserved for ROM code compatibility */
+ actual_eccbytes = ecc->bytes - 1;
+ erased_ecc_vec = bch8_vector;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ actual_eccbytes = ecc->bytes;
+ erased_ecc_vec = bch16_vector;
+ break;
+ default:
+ dev_err(&info->pdev->dev, "invalid driver configuration\n");
+ return -EINVAL;
+ }
+
+ /* Initialize elm error vector to zero */
+ memset(err_vec, 0, sizeof(err_vec));
+
+ for (i = 0; i < eccsteps ; i++) {
+ eccflag = 0; /* initialize eccflag */
+
+ /*
+ * Check any error reported,
+ * In case of error, non zero ecc reported.
+ */
+ for (j = 0; j < actual_eccbytes; j++) {
+ if (calc_ecc[j] != 0) {
+ eccflag = 1; /* non zero ecc, error present */
+ break;
+ }
+ }
+
+ if (eccflag == 1) {
+ if (memcmp(calc_ecc, erased_ecc_vec,
+ actual_eccbytes) == 0) {
+ /*
+ * calc_ecc[] matches pattern for ECC(all 0xff)
+ * so this is definitely an erased-page
+ */
+ } else {
+ buf = &data[info->nand.ecc.size * i];
+ /*
+ * count number of 0-bits in read_buf.
+ * This check can be removed once a similar
+ * check is introduced in generic NAND driver
+ */
+ bitflip_count = erased_sector_bitflips(
+ buf, read_ecc, info);
+ if (bitflip_count) {
+ /*
+ * number of 0-bits within ECC limits
+ * So this may be an erased-page
+ */
+ stat += bitflip_count;
+ } else {
+ /*
+ * Too many 0-bits. It may be a
+ * - programmed-page, OR
+ * - erased-page with many bit-flips
+ * So this page requires check by ELM
+ */
+ err_vec[i].error_reported = true;
+ is_error_reported = true;
+ }
+ }
+ }
+
+ /* Update the ecc vector */
+ calc_ecc += ecc->bytes;
+ read_ecc += ecc->bytes;
+ }
+
+ /* Check if any error reported */
+ if (!is_error_reported)
+ return stat;
+
+ /* Decode BCH error using ELM module */
+ elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+
+ err = 0;
+ for (i = 0; i < eccsteps; i++) {
+ if (err_vec[i].error_uncorrectable) {
+ dev_err(&info->pdev->dev,
+ "uncorrectable bit-flips found\n");
+ err = -EBADMSG;
+ } else if (err_vec[i].error_reported) {
+ for (j = 0; j < err_vec[i].error_count; j++) {
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Add 4 bits to take care of padding */
+ pos = err_vec[i].error_loc[j] +
+ BCH4_BIT_PAD;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ case OMAP_ECC_BCH16_CODE_HW:
+ pos = err_vec[i].error_loc[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+ error_max = (ecc->size + actual_eccbytes) * 8;
+ /* Calculate bit position of error */
+ bit_pos = pos % 8;
+
+ /* Calculate byte position of error */
+ byte_pos = (error_max - pos - 1) / 8;
+
+ if (pos < error_max) {
+ if (byte_pos < 512) {
+ pr_debug("bitflip@dat[%d]=%x\n",
+ byte_pos, data[byte_pos]);
+ data[byte_pos] ^= 1 << bit_pos;
+ } else {
+ pr_debug("bitflip@oob[%d]=%x\n",
+ (byte_pos - 512),
+ spare_ecc[byte_pos - 512]);
+ spare_ecc[byte_pos - 512] ^=
+ 1 << bit_pos;
+ }
+ } else {
+ dev_err(&info->pdev->dev,
+ "invalid bit-flip @ %d:%d\n",
+ byte_pos, bit_pos);
+ err = -EBADMSG;
+ }
+ }
+ }
+
+ /* Update number of correctable errors */
+ stat = max_t(unsigned int, stat, err_vec[i].error_count);
+
+ /* Update page data with sector size */
+ data += ecc->size;
+ spare_ecc += ecc->bytes;
+ }
+
+ return (err) ? err : stat;
+}
+
+/**
+ * omap_write_page_bch - BCH ecc based write page function for entire page
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page
+ *
+ * Custom write page method evolved to support multi sector writing in one shot
+ */
+static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->legacy.write_buf(chip, buf, mtd->writesize);
+
+ /* Update ecc vector from GPMC result registers */
+ omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* Write ecc vector to OOB area */
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * omap_write_subpage_bch - BCH hardware ECC based subpage write
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * OMAP optimized subpage write method.
+ */
+static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ u32 start_step = offset / ecc_size;
+ u32 end_step = (offset + data_len - 1) / ecc_size;
+ int step, ret = 0;
+
+ /*
+ * Write entire page at one go as it would be optimal
+ * as ECC is calculated by hardware.
+ * ECC is calculated for all subpages but we choose
+ * only what we want.
+ */
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ /* Enable GPMC ECC engine */
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->legacy.write_buf(chip, buf, mtd->writesize);
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if (step < start_step || step > end_step)
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
+
+ if (ret)
+ return ret;
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->ecc.calc_buf;
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* write OOB buffer to NAND device */
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * omap_read_page_bch - BCH ecc based page read function for entire page
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
+ * used for error correction.
+ * Custom method evolved to support ELM error correction & multi sector
+ * reading. On reading page data area is read along with OOB data with
+ * ecc engine enabled. ecc vector updated after read of OOB data.
+ * For non error pages ecc vector reported as zero.
+ */
+static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ int stat, ret;
+ unsigned int max_bitflips = 0;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ /* Read data */
+ chip->legacy.read_buf(chip, buf, mtd->writesize);
+
+ /* Read oob bytes */
+ nand_change_read_column_op(chip,
+ mtd->writesize + BADBLOCK_MARKER_LENGTH,
+ chip->oob_poi + BADBLOCK_MARKER_LENGTH,
+ chip->ecc.total, false);
+
+ /* Calculate ecc bytes */
+ omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ stat = chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ return max_bitflips;
+}
+
+/**
+ * is_elm_present - checks for presence of ELM module by scanning DT nodes
+ * @omap_nand_info: NAND device structure containing platform data
+ */
+static bool is_elm_present(struct omap_nand_info *info,
+ struct device_node *elm_node)
+{
+ struct platform_device *pdev;
+
+ /* check whether elm-id is passed via DT */
+ if (!elm_node) {
+ dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
+ return false;
+ }
+ pdev = of_find_device_by_node(elm_node);
+ /* check whether ELM device is registered */
+ if (!pdev) {
+ dev_err(&info->pdev->dev, "ELM device not found\n");
+ return false;
+ }
+ /* ELM module available, now configure it */
+ info->elm_dev = &pdev->dev;
+ return true;
+}
+
+static bool omap2_nand_ecc_check(struct omap_nand_info *info)
+{
+ bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
+
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ ecc_needs_omap_bch = false;
+ ecc_needs_bch = true;
+ ecc_needs_elm = false;
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ case OMAP_ECC_BCH16_CODE_HW:
+ ecc_needs_omap_bch = true;
+ ecc_needs_bch = false;
+ ecc_needs_elm = true;
+ break;
+ default:
+ ecc_needs_omap_bch = false;
+ ecc_needs_bch = false;
+ ecc_needs_elm = false;
+ break;
+ }
+
+ if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
+ dev_err(&info->pdev->dev,
+ "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
+ return false;
+ }
+ if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
+ dev_err(&info->pdev->dev,
+ "CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ return false;
+ }
+ if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
+ dev_err(&info->pdev->dev, "ELM not available\n");
+ return false;
+ }
+
+ return true;
+}
+
+static const char * const nand_xfer_types[] = {
+ [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
+ [NAND_OMAP_POLLED] = "polled",
+ [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
+ [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
+};
+
+static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
+{
+ struct device_node *child = dev->of_node;
+ int i;
+ const char *s;
+ u32 cs;
+
+ if (of_property_read_u32(child, "reg", &cs) < 0) {
+ dev_err(dev, "reg not found in DT\n");
+ return -EINVAL;
+ }
+
+ info->gpmc_cs = cs;
+
+ /* detect availability of ELM module. Won't be present pre-OMAP4 */
+ info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
+ if (!info->elm_of_node) {
+ info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
+ if (!info->elm_of_node)
+ dev_dbg(dev, "ti,elm-id not in DT\n");
+ }
+
+ /* select ecc-scheme for NAND */
+ if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
+ dev_err(dev, "ti,nand-ecc-opt not found\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(s, "sw")) {
+ info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
+ } else if (!strcmp(s, "ham1") ||
+ !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
+ info->ecc_opt = OMAP_ECC_HAM1_CODE_HW;
+ } else if (!strcmp(s, "bch4")) {
+ if (info->elm_of_node)
+ info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
+ else
+ info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
+ } else if (!strcmp(s, "bch8")) {
+ if (info->elm_of_node)
+ info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
+ else
+ info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
+ } else if (!strcmp(s, "bch16")) {
+ info->ecc_opt = OMAP_ECC_BCH16_CODE_HW;
+ } else {
+ dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
+ return -EINVAL;
+ }
+
+ /* select data transfer mode */
+ if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
+ for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
+ if (!strcasecmp(s, nand_xfer_types[i])) {
+ info->xfer_type = i;
+ return 0;
+ }
+ }
+
+ dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct nand_chip *chip = &info->nand;
+ int off = BADBLOCK_MARKER_LENGTH;
+
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ off = 1;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = off;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int omap_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct nand_chip *chip = &info->nand;
+ int off = BADBLOCK_MARKER_LENGTH;
+
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ off = 1;
+
+ if (section)
+ return -ERANGE;
+
+ off += chip->ecc.total;
+ if (off >= mtd->oobsize)
+ return -ERANGE;
+
+ oobregion->offset = off;
+ oobregion->length = mtd->oobsize - off;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
+ .ecc = omap_ooblayout_ecc,
+ .free = omap_ooblayout_free,
+};
+
+static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int off = BADBLOCK_MARKER_LENGTH;
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ /*
+ * When SW correction is employed, one OMAP specific marker byte is
+ * reserved after each ECC step.
+ */
+ oobregion->offset = off + (section * (chip->ecc.bytes + 1));
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int off = BADBLOCK_MARKER_LENGTH;
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * When SW correction is employed, one OMAP specific marker byte is
+ * reserved after each ECC step.
+ */
+ off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+ if (off >= mtd->oobsize)
+ return -ERANGE;
+
+ oobregion->offset = off;
+ oobregion->length = mtd->oobsize - off;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
+ .ecc = omap_sw_ooblayout_ecc,
+ .free = omap_sw_ooblayout_free,
+};
+
+static int omap_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct device *dev = &info->pdev->dev;
+ int min_oobbytes = BADBLOCK_MARKER_LENGTH;
+ int oobbytes_per_step;
+ dma_cap_mask_t mask;
+ int err;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ else
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* Re-populate low-level callbacks based on xfer modes */
+ switch (info->xfer_type) {
+ case NAND_OMAP_PREFETCH_POLLED:
+ chip->legacy.read_buf = omap_read_buf_pref;
+ chip->legacy.write_buf = omap_write_buf_pref;
+ break;
+
+ case NAND_OMAP_POLLED:
+ /* Use nand_base defaults for {read,write}_buf */
+ break;
+
+ case NAND_OMAP_PREFETCH_DMA:
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ info->dma = dma_request_chan(dev->parent, "rxtx");
+
+ if (IS_ERR(info->dma)) {
+ dev_err(dev, "DMA engine request failed\n");
+ return PTR_ERR(info->dma);
+ } else {
+ struct dma_slave_config cfg;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = info->phys_base;
+ cfg.dst_addr = info->phys_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 16;
+ cfg.dst_maxburst = 16;
+ err = dmaengine_slave_config(info->dma, &cfg);
+ if (err) {
+ dev_err(dev,
+ "DMA engine slave config failed: %d\n",
+ err);
+ return err;
+ }
+ chip->legacy.read_buf = omap_read_buf_dma_pref;
+ chip->legacy.write_buf = omap_write_buf_dma_pref;
+ }
+ break;
+
+ case NAND_OMAP_PREFETCH_IRQ:
+ info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
+ if (info->gpmc_irq_fifo <= 0)
+ return -ENODEV;
+ err = devm_request_irq(dev, info->gpmc_irq_fifo,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-fifo", info);
+ if (err) {
+ dev_err(dev, "Requesting IRQ %d, error %d\n",
+ info->gpmc_irq_fifo, err);
+ info->gpmc_irq_fifo = 0;
+ return err;
+ }
+
+ info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
+ if (info->gpmc_irq_count <= 0)
+ return -ENODEV;
+ err = devm_request_irq(dev, info->gpmc_irq_count,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-count", info);
+ if (err) {
+ dev_err(dev, "Requesting IRQ %d, error %d\n",
+ info->gpmc_irq_count, err);
+ info->gpmc_irq_count = 0;
+ return err;
+ }
+
+ chip->legacy.read_buf = omap_read_buf_irq_pref;
+ chip->legacy.write_buf = omap_write_buf_irq_pref;
+
+ break;
+
+ default:
+ dev_err(dev, "xfer_type %d not supported!\n", info->xfer_type);
+ return -EINVAL;
+ }
+
+ if (!omap2_nand_ecc_check(info))
+ return -EINVAL;
+
+ /*
+ * Bail out earlier to let NAND_ECC_ENGINE_TYPE_SOFT code create its own
+ * ooblayout instead of using ours.
+ */
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ return 0;
+ }
+
+ /* Populate MTD interface based on ECC scheme */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_HAM1_CODE_HW:
+ dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.bytes = 3;
+ chip->ecc.size = 512;
+ chip->ecc.strength = 1;
+ chip->ecc.calculate = omap_calculate_ecc;
+ chip->ecc.hwctl = omap_enable_hwecc;
+ chip->ecc.correct = omap_correct_data;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = chip->ecc.bytes;
+
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ min_oobbytes = 1;
+
+ break;
+
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 7;
+ chip->ecc.strength = 4;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.calculate = omap_calculate_ecc_bch_sw;
+ mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
+ /* Reserve one byte for the OMAP marker */
+ oobbytes_per_step = chip->ecc.bytes + 1;
+ /* Software BCH library is used for locating errors */
+ chip->ecc.priv = nand_bch_init(mtd);
+ if (!chip->ecc.priv) {
+ dev_err(dev, "Unable to use BCH library\n");
+ return -EINVAL;
+ }
+ break;
+
+ case OMAP_ECC_BCH4_CODE_HW:
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ chip->ecc.bytes = 7 + 1;
+ chip->ecc.strength = 4;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = chip->ecc.bytes;
+
+ err = elm_config(info->elm_dev, BCH4_ECC,
+ mtd->writesize / chip->ecc.size,
+ chip->ecc.size, chip->ecc.bytes);
+ if (err < 0)
+ return err;
+ break;
+
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 13;
+ chip->ecc.strength = 8;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.calculate = omap_calculate_ecc_bch_sw;
+ mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
+ /* Reserve one byte for the OMAP marker */
+ oobbytes_per_step = chip->ecc.bytes + 1;
+ /* Software BCH library is used for locating errors */
+ chip->ecc.priv = nand_bch_init(mtd);
+ if (!chip->ecc.priv) {
+ dev_err(dev, "unable to use BCH library\n");
+ return -EINVAL;
+ }
+ break;
+
+ case OMAP_ECC_BCH8_CODE_HW:
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ chip->ecc.bytes = 13 + 1;
+ chip->ecc.strength = 8;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = chip->ecc.bytes;
+
+ err = elm_config(info->elm_dev, BCH8_ECC,
+ mtd->writesize / chip->ecc.size,
+ chip->ecc.size, chip->ecc.bytes);
+ if (err < 0)
+ return err;
+
+ break;
+
+ case OMAP_ECC_BCH16_CODE_HW:
+ pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 26;
+ chip->ecc.strength = 16;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = chip->ecc.bytes;
+
+ err = elm_config(info->elm_dev, BCH16_ECC,
+ mtd->writesize / chip->ecc.size,
+ chip->ecc.size, chip->ecc.bytes);
+ if (err < 0)
+ return err;
+
+ break;
+ default:
+ dev_err(dev, "Invalid or unsupported ECC scheme\n");
+ return -EINVAL;
+ }
+
+ /* Check if NAND device's OOB is enough to store ECC signatures */
+ min_oobbytes += (oobbytes_per_step *
+ (mtd->writesize / chip->ecc.size));
+ if (mtd->oobsize < min_oobbytes) {
+ dev_err(dev,
+ "Not enough OOB bytes: required = %d, available=%d\n",
+ min_oobbytes, mtd->oobsize);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops omap_nand_controller_ops = {
+ .attach_chip = omap_nand_attach_chip,
+};
+
+/* Shared among all NAND instances to synchronize access to the ECC Engine */
+static struct nand_controller omap_gpmc_controller;
+static bool omap_gpmc_controller_initialized;
+
+static int omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int err;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pdev = pdev;
+
+ err = omap_get_dt_info(dev, info);
+ if (err)
+ return err;
+
+ info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
+ if (!info->ops) {
+ dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
+ return -ENODEV;
+ }
+
+ nand_chip = &info->nand;
+ mtd = nand_to_mtd(nand_chip);
+ mtd->dev.parent = &pdev->dev;
+ nand_chip->ecc.priv = NULL;
+ nand_set_flash_node(nand_chip, dev->of_node);
+
+ if (!mtd->name) {
+ mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "omap2-nand.%d", info->gpmc_cs);
+ if (!mtd->name) {
+ dev_err(&pdev->dev, "Failed to set MTD name\n");
+ return -ENOMEM;
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nand_chip->legacy.IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nand_chip->legacy.IO_ADDR_R))
+ return PTR_ERR(nand_chip->legacy.IO_ADDR_R);
+
+ info->phys_base = res->start;
+
+ if (!omap_gpmc_controller_initialized) {
+ omap_gpmc_controller.ops = &omap_nand_controller_ops;
+ nand_controller_init(&omap_gpmc_controller);
+ omap_gpmc_controller_initialized = true;
+ }
+
+ nand_chip->controller = &omap_gpmc_controller;
+
+ nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
+ nand_chip->legacy.cmd_ctrl = omap_hwcontrol;
+
+ info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
+ GPIOD_IN);
+ if (IS_ERR(info->ready_gpiod)) {
+ dev_err(dev, "failed to get ready gpio\n");
+ return PTR_ERR(info->ready_gpiod);
+ }
+
+ /*
+ * If RDY/BSY line is connected to OMAP then use the omap ready
+ * function and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line. Otherwise use a standard
+ * chip delay which is slightly more than tR (AC Timing) of the NAND
+ * device and read status register until you get a failure or success
+ */
+ if (info->ready_gpiod) {
+ nand_chip->legacy.dev_ready = omap_dev_ready;
+ nand_chip->legacy.chip_delay = 0;
+ } else {
+ nand_chip->legacy.waitfunc = omap_wait;
+ nand_chip->legacy.chip_delay = 50;
+ }
+
+ if (info->flash_bbt)
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ /* scan NAND device connected to chip controller */
+ nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
+
+ err = nand_scan(nand_chip, 1);
+ if (err)
+ goto return_error;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto cleanup_nand;
+
+ platform_set_drvdata(pdev, mtd);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(nand_chip);
+
+return_error:
+ if (!IS_ERR_OR_NULL(info->dma))
+ dma_release_channel(info->dma);
+ if (nand_chip->ecc.priv) {
+ nand_bch_free(nand_chip->ecc.priv);
+ nand_chip->ecc.priv = NULL;
+ }
+ return err;
+}
+
+static int omap_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int ret;
+
+ if (nand_chip->ecc.priv) {
+ nand_bch_free(nand_chip->ecc.priv);
+ nand_chip->ecc.priv = NULL;
+ }
+ if (info->dma)
+ dma_release_channel(info->dma);
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(nand_chip);
+ return ret;
+}
+
+static const struct of_device_id omap_nand_ids[] = {
+ { .compatible = "ti,omap2-nand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_nand_ids);
+
+static struct platform_driver omap_nand_driver = {
+ .probe = omap_nand_probe,
+ .remove = omap_nand_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(omap_nand_ids),
+ },
+};
+
+module_platform_driver(omap_nand_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
new file mode 100644
index 000000000..dad17fa0b
--- /dev/null
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#define DRIVER_NAME "omap-elm"
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/elm.h>
+
+#define ELM_SYSCONFIG 0x010
+#define ELM_IRQSTATUS 0x018
+#define ELM_IRQENABLE 0x01c
+#define ELM_LOCATION_CONFIG 0x020
+#define ELM_PAGE_CTRL 0x080
+#define ELM_SYNDROME_FRAGMENT_0 0x400
+#define ELM_SYNDROME_FRAGMENT_1 0x404
+#define ELM_SYNDROME_FRAGMENT_2 0x408
+#define ELM_SYNDROME_FRAGMENT_3 0x40c
+#define ELM_SYNDROME_FRAGMENT_4 0x410
+#define ELM_SYNDROME_FRAGMENT_5 0x414
+#define ELM_SYNDROME_FRAGMENT_6 0x418
+#define ELM_LOCATION_STATUS 0x800
+#define ELM_ERROR_LOCATION_0 0x880
+
+/* ELM Interrupt Status Register */
+#define INTR_STATUS_PAGE_VALID BIT(8)
+
+/* ELM Interrupt Enable Register */
+#define INTR_EN_PAGE_MASK BIT(8)
+
+/* ELM Location Configuration Register */
+#define ECC_BCH_LEVEL_MASK 0x3
+
+/* ELM syndrome */
+#define ELM_SYNDROME_VALID BIT(16)
+
+/* ELM_LOCATION_STATUS Register */
+#define ECC_CORRECTABLE_MASK BIT(8)
+#define ECC_NB_ERRORS_MASK 0x1f
+
+/* ELM_ERROR_LOCATION_0-15 Registers */
+#define ECC_ERROR_LOCATION_MASK 0x1fff
+
+#define ELM_ECC_SIZE 0x7ff
+
+#define SYNDROME_FRAGMENT_REG_SIZE 0x40
+#define ERROR_LOCATION_SIZE 0x100
+
+struct elm_registers {
+ u32 elm_irqenable;
+ u32 elm_sysconfig;
+ u32 elm_location_config;
+ u32 elm_page_ctrl;
+ u32 elm_syndrome_fragment_6[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_5[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_4[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_3[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_2[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_1[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_0[ERROR_VECTOR_MAX];
+};
+
+struct elm_info {
+ struct device *dev;
+ void __iomem *elm_base;
+ struct completion elm_completion;
+ struct list_head list;
+ enum bch_ecc bch_type;
+ struct elm_registers elm_regs;
+ int ecc_steps;
+ int ecc_syndrome_size;
+};
+
+static LIST_HEAD(elm_devices);
+
+static void elm_write_reg(struct elm_info *info, int offset, u32 val)
+{
+ writel(val, info->elm_base + offset);
+}
+
+static u32 elm_read_reg(struct elm_info *info, int offset)
+{
+ return readl(info->elm_base + offset);
+}
+
+/**
+ * elm_config - Configure ELM module
+ * @dev: ELM device
+ * @bch_type: Type of BCH ecc
+ */
+int elm_config(struct device *dev, enum bch_ecc bch_type,
+ int ecc_steps, int ecc_step_size, int ecc_syndrome_size)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_get_drvdata(dev);
+
+ if (!info) {
+ dev_err(dev, "Unable to configure elm - device not probed?\n");
+ return -EPROBE_DEFER;
+ }
+ /* ELM cannot detect ECC errors for chunks > 1KB */
+ if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) {
+ dev_err(dev, "unsupported config ecc-size=%d\n", ecc_step_size);
+ return -EINVAL;
+ }
+ /* ELM support 8 error syndrome process */
+ if (ecc_steps > ERROR_VECTOR_MAX) {
+ dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps);
+ return -EINVAL;
+ }
+
+ reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
+ info->bch_type = bch_type;
+ info->ecc_steps = ecc_steps;
+ info->ecc_syndrome_size = ecc_syndrome_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(elm_config);
+
+/**
+ * elm_configure_page_mode - Enable/Disable page mode
+ * @info: elm info
+ * @index: index number of syndrome fragment vector
+ * @enable: enable/disable flag for page mode
+ *
+ * Enable page mode for syndrome fragment index
+ */
+static void elm_configure_page_mode(struct elm_info *info, int index,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
+ if (enable)
+ reg_val |= BIT(index); /* enable page mode */
+ else
+ reg_val &= ~BIT(index); /* disable page mode */
+
+ elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
+}
+
+/**
+ * elm_load_syndrome - Load ELM syndrome reg
+ * @info: elm info
+ * @err_vec: elm error vectors
+ * @ecc: buffer with calculated ecc
+ *
+ * Load syndrome fragment registers with calculated ecc in reverse order.
+ */
+static void elm_load_syndrome(struct elm_info *info,
+ struct elm_errorvec *err_vec, u8 *ecc)
+{
+ int i, offset;
+ u32 val;
+
+ for (i = 0; i < info->ecc_steps; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ elm_configure_page_mode(info, i, true);
+ offset = ELM_SYNDROME_FRAGMENT_0 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ switch (info->bch_type) {
+ case BCH8_ECC:
+ /* syndrome fragment 0 = ecc[9-12B] */
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[5-8B] */
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 2 = ecc[1-4B] */
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 3 = ecc[0B] */
+ offset += 4;
+ val = ecc[0];
+ elm_write_reg(info, offset, val);
+ break;
+ case BCH4_ECC:
+ /* syndrome fragment 0 = ecc[20-52b] bits */
+ val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) |
+ ((ecc[2] & 0xf) << 28);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[0-20b] bits */
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12;
+ elm_write_reg(info, offset, val);
+ break;
+ case BCH16_ECC:
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16;
+ elm_write_reg(info, offset, val);
+ break;
+ default:
+ pr_err("invalid config bch_type\n");
+ }
+ }
+
+ /* Update ecc pointer with ecc byte size */
+ ecc += info->ecc_syndrome_size;
+ }
+}
+
+/**
+ * elm_start_processing - start elm syndrome processing
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * Set syndrome valid bit for syndrome fragment registers for which
+ * elm syndrome fragment registers are loaded. This enables elm module
+ * to start processing syndrome vectors.
+ */
+static void elm_start_processing(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, offset;
+ u32 reg_val;
+
+ /*
+ * Set syndrome vector valid, so that ELM module
+ * will process it for vectors error is reported
+ */
+ for (i = 0; i < info->ecc_steps; i++) {
+ if (err_vec[i].error_reported) {
+ offset = ELM_SYNDROME_FRAGMENT_6 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+ reg_val |= ELM_SYNDROME_VALID;
+ elm_write_reg(info, offset, reg_val);
+ }
+ }
+}
+
+/**
+ * elm_error_correction - locate correctable error position
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * On completion of processing by elm module, error location status
+ * register updated with correctable/uncorrectable error information.
+ * In case of correctable errors, number of errors located from
+ * elm location status register & read the positions from
+ * elm error location register.
+ */
+static void elm_error_correction(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, j, errors = 0;
+ int offset;
+ u32 reg_val;
+
+ for (i = 0; i < info->ecc_steps; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+
+ /* Check correctable error or not */
+ if (reg_val & ECC_CORRECTABLE_MASK) {
+ offset = ELM_ERROR_LOCATION_0 +
+ ERROR_LOCATION_SIZE * i;
+
+ /* Read count of correctable errors */
+ err_vec[i].error_count = reg_val &
+ ECC_NB_ERRORS_MASK;
+
+ /* Update the error locations in error vector */
+ for (j = 0; j < err_vec[i].error_count; j++) {
+
+ reg_val = elm_read_reg(info, offset);
+ err_vec[i].error_loc[j] = reg_val &
+ ECC_ERROR_LOCATION_MASK;
+
+ /* Update error location register */
+ offset += 4;
+ }
+
+ errors += err_vec[i].error_count;
+ } else {
+ err_vec[i].error_uncorrectable = true;
+ }
+
+ /* Clearing interrupts for processed error vectors */
+ elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
+
+ /* Disable page mode */
+ elm_configure_page_mode(info, i, false);
+ }
+ }
+}
+
+/**
+ * elm_decode_bch_error_page - Locate error position
+ * @dev: device pointer
+ * @ecc_calc: calculated ECC bytes from GPMC
+ * @err_vec: elm error vectors
+ *
+ * Called with one or more error reported vectors & vectors with
+ * error reported is updated in err_vec[].error_reported
+ */
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ u32 reg_val;
+
+ /* Enable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+ elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
+ elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
+
+ /* Load valid ecc byte to syndrome fragment register */
+ elm_load_syndrome(info, err_vec, ecc_calc);
+
+ /* Enable syndrome processing for which syndrome fragment is updated */
+ elm_start_processing(info, err_vec);
+
+ /* Wait for ELM module to finish locating error correction */
+ wait_for_completion(&info->elm_completion);
+
+ /* Disable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQENABLE);
+ elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
+ elm_error_correction(info, err_vec);
+}
+EXPORT_SYMBOL(elm_decode_bch_error_page);
+
+static irqreturn_t elm_isr(int this_irq, void *dev_id)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_id;
+
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+
+ /* All error vectors processed */
+ if (reg_val & INTR_STATUS_PAGE_VALID) {
+ elm_write_reg(info, ELM_IRQSTATUS,
+ reg_val & INTR_STATUS_PAGE_VALID);
+ complete(&info->elm_completion);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int elm_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res, *irq;
+ struct elm_info *info;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource defined\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->elm_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->elm_base))
+ return PTR_ERR(info->elm_base);
+
+ ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
+ pdev->name, info);
+ if (ret) {
+ dev_err(&pdev->dev, "failure requesting %pr\n", irq);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ if (pm_runtime_get_sync(&pdev->dev) < 0) {
+ ret = -EINVAL;
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ dev_err(&pdev->dev, "can't enable clock\n");
+ return ret;
+ }
+
+ init_completion(&info->elm_completion);
+ INIT_LIST_HEAD(&info->list);
+ list_add(&info->list, &elm_devices);
+ platform_set_drvdata(pdev, info);
+ return ret;
+}
+
+static int elm_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * elm_context_save
+ * saves ELM configurations to preserve them across Hardware powered-down
+ */
+static int elm_context_save(struct elm_info *info)
+{
+ struct elm_registers *regs = &info->elm_regs;
+ enum bch_ecc bch_type = info->bch_type;
+ u32 offset = 0, i;
+
+ regs->elm_irqenable = elm_read_reg(info, ELM_IRQENABLE);
+ regs->elm_sysconfig = elm_read_reg(info, ELM_SYSCONFIG);
+ regs->elm_location_config = elm_read_reg(info, ELM_LOCATION_CONFIG);
+ regs->elm_page_ctrl = elm_read_reg(info, ELM_PAGE_CTRL);
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+ switch (bch_type) {
+ case BCH16_ECC:
+ regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_6 + offset);
+ regs->elm_syndrome_fragment_5[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_5 + offset);
+ regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_4 + offset);
+ fallthrough;
+ case BCH8_ECC:
+ regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_3 + offset);
+ regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_2 + offset);
+ fallthrough;
+ case BCH4_ECC:
+ regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_1 + offset);
+ regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_0 + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* ELM SYNDROME_VALID bit in SYNDROME_FRAGMENT_6[] needs
+ * to be saved for all BCH schemes*/
+ regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_6 + offset);
+ }
+ return 0;
+}
+
+/**
+ * elm_context_restore
+ * writes configurations saved duing power-down back into ELM registers
+ */
+static int elm_context_restore(struct elm_info *info)
+{
+ struct elm_registers *regs = &info->elm_regs;
+ enum bch_ecc bch_type = info->bch_type;
+ u32 offset = 0, i;
+
+ elm_write_reg(info, ELM_IRQENABLE, regs->elm_irqenable);
+ elm_write_reg(info, ELM_SYSCONFIG, regs->elm_sysconfig);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, regs->elm_location_config);
+ elm_write_reg(info, ELM_PAGE_CTRL, regs->elm_page_ctrl);
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+ switch (bch_type) {
+ case BCH16_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+ regs->elm_syndrome_fragment_6[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset,
+ regs->elm_syndrome_fragment_5[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
+ regs->elm_syndrome_fragment_4[i]);
+ fallthrough;
+ case BCH8_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
+ regs->elm_syndrome_fragment_3[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
+ regs->elm_syndrome_fragment_2[i]);
+ fallthrough;
+ case BCH4_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
+ regs->elm_syndrome_fragment_1[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
+ regs->elm_syndrome_fragment_0[i]);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* ELM_SYNDROME_VALID bit to be set in last to trigger FSM */
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+ regs->elm_syndrome_fragment_6[i] &
+ ELM_SYNDROME_VALID);
+ }
+ return 0;
+}
+
+static int elm_suspend(struct device *dev)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ elm_context_save(info);
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int elm_resume(struct device *dev)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ pm_runtime_get_sync(dev);
+ elm_context_restore(info);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id elm_of_match[] = {
+ { .compatible = "ti,am3352-elm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, elm_of_match);
+#endif
+
+static struct platform_driver elm_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(elm_of_match),
+ .pm = &elm_pm_ops,
+ },
+ .probe = elm_probe,
+ .remove = elm_remove,
+};
+
+module_platform_driver(elm_driver);
+
+MODULE_DESCRIPTION("ELM driver for BCH error correction");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
new file mode 100644
index 000000000..2c87c7d89
--- /dev/null
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -0,0 +1,254 @@
+/*
+ * NAND support for Marvell Orion SoC platforms
+ *
+ * Tzachi Perelstein <tzachi@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/sizes.h>
+#include <linux/platform_data/mtd-orion_nand.h>
+
+struct orion_nand_info {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ struct clk *clk;
+};
+
+static void orion_nand_cmd_ctrl(struct nand_chip *nc, int cmd,
+ unsigned int ctrl)
+{
+ struct orion_nand_data *board = nand_get_controller_data(nc);
+ u32 offs;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ offs = (1 << board->cle);
+ else if (ctrl & NAND_ALE)
+ offs = (1 << board->ale);
+ else
+ return;
+
+ if (nc->options & NAND_BUSWIDTH_16)
+ offs <<= 1;
+
+ writeb(cmd, nc->legacy.IO_ADDR_W + offs);
+}
+
+static void orion_nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ void __iomem *io_base = chip->legacy.IO_ADDR_R;
+#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
+ uint64_t *buf64;
+#endif
+ int i = 0;
+
+ while (len && (unsigned long)buf & 7) {
+ *buf++ = readb(io_base);
+ len--;
+ }
+#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
+ buf64 = (uint64_t *)buf;
+ while (i < len/8) {
+ /*
+ * Since GCC has no proper constraint (PR 43518)
+ * force x variable to r2/r3 registers as ldrd instruction
+ * requires first register to be even.
+ */
+ register uint64_t x asm ("r2");
+
+ asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
+ buf64[i++] = x;
+ }
+ i *= 8;
+#else
+ readsl(io_base, buf, len/4);
+ i = len / 4 * 4;
+#endif
+ while (i < len)
+ buf[i++] = readb(io_base);
+}
+
+static int orion_nand_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops orion_nand_ops = {
+ .attach_chip = orion_nand_attach_chip,
+};
+
+static int __init orion_nand_probe(struct platform_device *pdev)
+{
+ struct orion_nand_info *info;
+ struct mtd_info *mtd;
+ struct nand_chip *nc;
+ struct orion_nand_data *board;
+ struct resource *res;
+ void __iomem *io_base;
+ int ret = 0;
+ u32 val = 0;
+
+ info = devm_kzalloc(&pdev->dev,
+ sizeof(struct orion_nand_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ nc = &info->chip;
+ mtd = nand_to_mtd(nc);
+
+ nand_controller_init(&info->controller);
+ info->controller.ops = &orion_nand_ops;
+ nc->controller = &info->controller;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ io_base = devm_ioremap_resource(&pdev->dev, res);
+
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ if (pdev->dev.of_node) {
+ board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
+ GFP_KERNEL);
+ if (!board)
+ return -ENOMEM;
+ if (!of_property_read_u32(pdev->dev.of_node, "cle", &val))
+ board->cle = (u8)val;
+ else
+ board->cle = 0;
+ if (!of_property_read_u32(pdev->dev.of_node, "ale", &val))
+ board->ale = (u8)val;
+ else
+ board->ale = 1;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "bank-width", &val))
+ board->width = (u8)val * 8;
+ else
+ board->width = 8;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "chip-delay", &val))
+ board->chip_delay = (u8)val;
+ } else {
+ board = dev_get_platdata(&pdev->dev);
+ }
+
+ mtd->dev.parent = &pdev->dev;
+
+ nand_set_controller_data(nc, board);
+ nand_set_flash_node(nc, pdev->dev.of_node);
+ nc->legacy.IO_ADDR_R = nc->legacy.IO_ADDR_W = io_base;
+ nc->legacy.cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->legacy.read_buf = orion_nand_read_buf;
+
+ if (board->chip_delay)
+ nc->legacy.chip_delay = board->chip_delay;
+
+ WARN(board->width > 16,
+ "%d bit bus width out of range",
+ board->width);
+
+ if (board->width == 16)
+ nc->options |= NAND_BUSWIDTH_16;
+
+ platform_set_drvdata(pdev, info);
+
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ info->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(info->clk)) {
+ ret = PTR_ERR(info->clk);
+ if (ret == -ENOENT) {
+ info->clk = NULL;
+ } else {
+ dev_err(&pdev->dev, "failed to get clock!\n");
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare clock!\n");
+ return ret;
+ }
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ nc->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ ret = nand_scan(nc, 1);
+ if (ret)
+ goto no_dev;
+
+ mtd->name = "orion_nand";
+ ret = mtd_device_register(mtd, board->parts, board->nr_parts);
+ if (ret) {
+ nand_cleanup(nc);
+ goto no_dev;
+ }
+
+ return 0;
+
+no_dev:
+ clk_disable_unprepare(info->clk);
+ return ret;
+}
+
+static int orion_nand_remove(struct platform_device *pdev)
+{
+ struct orion_nand_info *info = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &info->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+
+ nand_cleanup(chip);
+
+ clk_disable_unprepare(info->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id orion_nand_of_match_table[] = {
+ { .compatible = "marvell,orion-nand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, orion_nand_of_match_table);
+#endif
+
+static struct platform_driver orion_nand_driver = {
+ .remove = orion_nand_remove,
+ .driver = {
+ .name = "orion_nand",
+ .of_match_table = of_match_ptr(orion_nand_of_match_table),
+ },
+};
+
+module_platform_driver_probe(orion_nand_driver, orion_nand_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_DESCRIPTION("NAND glue for Orion platforms");
+MODULE_ALIAS("platform:orion_nand");
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
new file mode 100644
index 000000000..f44947043
--- /dev/null
+++ b/drivers/mtd/nand/raw/oxnas_nand.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Oxford Semiconductor OXNAS NAND driver
+
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ * Heavily based on plat_nand.c :
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+
+/* Nand commands */
+#define OXNAS_NAND_CMD_ALE BIT(18)
+#define OXNAS_NAND_CMD_CLE BIT(19)
+
+#define OXNAS_NAND_MAX_CHIPS 1
+
+struct oxnas_nand_ctrl {
+ struct nand_controller base;
+ void __iomem *io_base;
+ struct clk *clk;
+ struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
+ unsigned int nchips;
+};
+
+static uint8_t oxnas_nand_read_byte(struct nand_chip *chip)
+{
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ return readb(oxnas->io_base);
+}
+
+static void oxnas_nand_read_buf(struct nand_chip *chip, u8 *buf, int len)
+{
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ ioread8_rep(oxnas->io_base, buf, len);
+}
+
+static void oxnas_nand_write_buf(struct nand_chip *chip, const u8 *buf,
+ int len)
+{
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ iowrite8_rep(oxnas->io_base, buf, len);
+}
+
+/* Single CS command control */
+static void oxnas_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_CLE);
+ else if (ctrl & NAND_ALE)
+ writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_ALE);
+}
+
+/*
+ * Probe for the NAND device.
+ */
+static int oxnas_nand_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *nand_np;
+ struct oxnas_nand_ctrl *oxnas;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct resource *res;
+ int count = 0;
+ int err = 0;
+ int i;
+
+ /* Allocate memory for the device structure (and zero it) */
+ oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas),
+ GFP_KERNEL);
+ if (!oxnas)
+ return -ENOMEM;
+
+ nand_controller_init(&oxnas->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ oxnas->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(oxnas->io_base))
+ return PTR_ERR(oxnas->io_base);
+
+ oxnas->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(oxnas->clk))
+ oxnas->clk = NULL;
+
+ /* Only a single chip node is supported */
+ count = of_get_child_count(np);
+ if (count > 1)
+ return -EINVAL;
+
+ err = clk_prepare_enable(oxnas->clk);
+ if (err)
+ return err;
+
+ device_reset_optional(&pdev->dev);
+
+ for_each_child_of_node(np, nand_np) {
+ chip = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!chip) {
+ err = -ENOMEM;
+ goto err_release_child;
+ }
+
+ chip->controller = &oxnas->base;
+
+ nand_set_flash_node(chip, nand_np);
+ nand_set_controller_data(chip, oxnas);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = &pdev->dev;
+ mtd->priv = chip;
+
+ chip->legacy.cmd_ctrl = oxnas_nand_cmd_ctrl;
+ chip->legacy.read_buf = oxnas_nand_read_buf;
+ chip->legacy.read_byte = oxnas_nand_read_byte;
+ chip->legacy.write_buf = oxnas_nand_write_buf;
+ chip->legacy.chip_delay = 30;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(chip, 1);
+ if (err)
+ goto err_release_child;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto err_cleanup_nand;
+
+ oxnas->chips[oxnas->nchips++] = chip;
+ }
+
+ /* Exit if no chips found */
+ if (!oxnas->nchips) {
+ err = -ENODEV;
+ goto err_clk_unprepare;
+ }
+
+ platform_set_drvdata(pdev, oxnas);
+
+ return 0;
+
+err_cleanup_nand:
+ nand_cleanup(chip);
+err_release_child:
+ of_node_put(nand_np);
+
+ for (i = 0; i < oxnas->nchips; i++) {
+ chip = oxnas->chips[i];
+ WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
+ nand_cleanup(chip);
+ }
+
+err_clk_unprepare:
+ clk_disable_unprepare(oxnas->clk);
+ return err;
+}
+
+static int oxnas_nand_remove(struct platform_device *pdev)
+{
+ struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
+ struct nand_chip *chip;
+ int i;
+
+ for (i = 0; i < oxnas->nchips; i++) {
+ chip = oxnas->chips[i];
+ WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
+ nand_cleanup(chip);
+ }
+
+ clk_disable_unprepare(oxnas->clk);
+
+ return 0;
+}
+
+static const struct of_device_id oxnas_nand_match[] = {
+ { .compatible = "oxsemi,ox820-nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, oxnas_nand_match);
+
+static struct platform_driver oxnas_nand_driver = {
+ .probe = oxnas_nand_probe,
+ .remove = oxnas_nand_remove,
+ .driver = {
+ .name = "oxnas_nand",
+ .of_match_table = oxnas_nand_match,
+ },
+};
+
+module_platform_driver(oxnas_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Oxnas NAND driver");
+MODULE_ALIAS("platform:oxnas_nand");
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
new file mode 100644
index 000000000..b0ba1fdbf
--- /dev/null
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip NAND flash interface
+ */
+
+#undef DEBUG
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+
+#define LBICTRL_LPCCTL_NR 0x00004000
+#define CLE_PIN_CTL 15
+#define ALE_PIN_CTL 14
+
+static unsigned int lpcctl;
+static struct mtd_info *pasemi_nand_mtd;
+static struct nand_controller controller;
+static const char driver_name[] = "pasemi-nand";
+
+static void pasemi_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ while (len > 0x800) {
+ memcpy_fromio(buf, chip->legacy.IO_ADDR_R, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_fromio(buf, chip->legacy.IO_ADDR_R, len);
+}
+
+static void pasemi_write_buf(struct nand_chip *chip, const u_char *buf,
+ int len)
+{
+ while (len > 0x800) {
+ memcpy_toio(chip->legacy.IO_ADDR_R, buf, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_toio(chip->legacy.IO_ADDR_R, buf, len);
+}
+
+static void pasemi_hwcontrol(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ out_8(chip->legacy.IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
+ else
+ out_8(chip->legacy.IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
+
+ /* Push out posted writes */
+ eieio();
+ inl(lpcctl);
+}
+
+static int pasemi_device_ready(struct nand_chip *chip)
+{
+ return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
+}
+
+static int pasemi_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops pasemi_ops = {
+ .attach_chip = pasemi_attach_chip,
+};
+
+static int pasemi_nand_probe(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct pci_dev *pdev;
+ struct device_node *np = dev->of_node;
+ struct resource res;
+ struct nand_chip *chip;
+ int err = 0;
+
+ err = of_address_to_resource(np, 0, &res);
+
+ if (err)
+ return -EINVAL;
+
+ /* We only support one device at the moment */
+ if (pasemi_nand_mtd)
+ return -ENODEV;
+
+ dev_dbg(dev, "pasemi_nand at %pR\n", &res);
+
+ /* Allocate memory for MTD device structure and private data */
+ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+ if (!chip) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ controller.ops = &pasemi_ops;
+ nand_controller_init(&controller);
+ chip->controller = &controller;
+
+ pasemi_nand_mtd = nand_to_mtd(chip);
+
+ /* Link the private data with the MTD structure */
+ pasemi_nand_mtd->dev.parent = dev;
+
+ chip->legacy.IO_ADDR_R = of_iomap(np, 0);
+ chip->legacy.IO_ADDR_W = chip->legacy.IO_ADDR_R;
+
+ if (!chip->legacy.IO_ADDR_R) {
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
+ if (!pdev) {
+ err = -ENODEV;
+ goto out_ior;
+ }
+
+ lpcctl = pci_resource_start(pdev, 0);
+ pci_dev_put(pdev);
+
+ if (!request_region(lpcctl, 4, driver_name)) {
+ err = -EBUSY;
+ goto out_ior;
+ }
+
+ chip->legacy.cmd_ctrl = pasemi_hwcontrol;
+ chip->legacy.dev_ready = pasemi_device_ready;
+ chip->legacy.read_buf = pasemi_read_buf;
+ chip->legacy.write_buf = pasemi_write_buf;
+ chip->legacy.chip_delay = 0;
+
+ /* Enable the following for a flash based bad block table */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(chip, 1);
+ if (err)
+ goto out_lpc;
+
+ if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
+ dev_err(dev, "Unable to register MTD device\n");
+ err = -ENODEV;
+ goto out_cleanup_nand;
+ }
+
+ dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
+ lpcctl);
+
+ return 0;
+
+ out_cleanup_nand:
+ nand_cleanup(chip);
+ out_lpc:
+ release_region(lpcctl, 4);
+ out_ior:
+ iounmap(chip->legacy.IO_ADDR_R);
+ out_mtd:
+ kfree(chip);
+ out:
+ return err;
+}
+
+static int pasemi_nand_remove(struct platform_device *ofdev)
+{
+ struct nand_chip *chip;
+ int ret;
+
+ if (!pasemi_nand_mtd)
+ return 0;
+
+ chip = mtd_to_nand(pasemi_nand_mtd);
+
+ /* Release resources, unregister device */
+ ret = mtd_device_unregister(pasemi_nand_mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ release_region(lpcctl, 4);
+
+ iounmap(chip->legacy.IO_ADDR_R);
+
+ /* Free the MTD device structure */
+ kfree(chip);
+
+ pasemi_nand_mtd = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id pasemi_nand_match[] =
+{
+ {
+ .compatible = "pasemi,localbus-nand",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, pasemi_nand_match);
+
+static struct platform_driver pasemi_nand_driver =
+{
+ .driver = {
+ .name = driver_name,
+ .of_match_table = pasemi_nand_match,
+ },
+ .probe = pasemi_nand_probe,
+ .remove = pasemi_nand_remove,
+};
+
+module_platform_driver(pasemi_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
new file mode 100644
index 000000000..0ee08c42c
--- /dev/null
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic NAND driver
+ *
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/platnand.h>
+
+struct plat_nand_data {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ void __iomem *io_base;
+};
+
+static int plat_nand_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops plat_nand_ops = {
+ .attach_chip = plat_nand_attach_chip,
+};
+
+/*
+ * Probe for the NAND device.
+ */
+static int plat_nand_probe(struct platform_device *pdev)
+{
+ struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+ struct plat_nand_data *data;
+ struct mtd_info *mtd;
+ struct resource *res;
+ const char **part_types;
+ int err = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_nand_data is missing\n");
+ return -EINVAL;
+ }
+
+ if (pdata->chip.nr_chips < 1) {
+ dev_err(&pdev->dev, "invalid number of chips specified\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->controller.ops = &plat_nand_ops;
+ nand_controller_init(&data->controller);
+ data->chip.controller = &data->controller;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->io_base))
+ return PTR_ERR(data->io_base);
+
+ nand_set_flash_node(&data->chip, pdev->dev.of_node);
+ mtd = nand_to_mtd(&data->chip);
+ mtd->dev.parent = &pdev->dev;
+
+ data->chip.legacy.IO_ADDR_R = data->io_base;
+ data->chip.legacy.IO_ADDR_W = data->io_base;
+ data->chip.legacy.cmd_ctrl = pdata->ctrl.cmd_ctrl;
+ data->chip.legacy.dev_ready = pdata->ctrl.dev_ready;
+ data->chip.legacy.select_chip = pdata->ctrl.select_chip;
+ data->chip.legacy.write_buf = pdata->ctrl.write_buf;
+ data->chip.legacy.read_buf = pdata->ctrl.read_buf;
+ data->chip.legacy.chip_delay = pdata->chip.chip_delay;
+ data->chip.options |= pdata->chip.options;
+ data->chip.bbt_options |= pdata->chip.bbt_options;
+
+ platform_set_drvdata(pdev, data);
+
+ /* Handle any platform specific setup */
+ if (pdata->ctrl.probe) {
+ err = pdata->ctrl.probe(pdev);
+ if (err)
+ goto out;
+ }
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(&data->chip, pdata->chip.nr_chips);
+ if (err)
+ goto out;
+
+ part_types = pdata->chip.part_probe_types;
+
+ err = mtd_device_parse_register(mtd, part_types, NULL,
+ pdata->chip.partitions,
+ pdata->chip.nr_partitions);
+
+ if (!err)
+ return err;
+
+ nand_cleanup(&data->chip);
+out:
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
+ return err;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int plat_nand_remove(struct platform_device *pdev)
+{
+ struct plat_nand_data *data = platform_get_drvdata(pdev);
+ struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+ struct nand_chip *chip = &data->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id plat_nand_match[] = {
+ { .compatible = "gen_nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, plat_nand_match);
+
+static struct platform_driver plat_nand_driver = {
+ .probe = plat_nand_probe,
+ .remove = plat_nand_remove,
+ .driver = {
+ .name = "gen_nand",
+ .of_match_table = plat_nand_match,
+ },
+};
+
+module_platform_driver(plat_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Wool");
+MODULE_DESCRIPTION("Simple generic NAND driver");
+MODULE_ALIAS("platform:gen_nand");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
new file mode 100644
index 000000000..be7190f04
--- /dev/null
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -0,0 +1,3083 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/dma/qcom_bam_dma.h>
+
+/* NANDc reg offsets */
+#define NAND_FLASH_CMD 0x00
+#define NAND_ADDR0 0x04
+#define NAND_ADDR1 0x08
+#define NAND_FLASH_CHIP_SELECT 0x0c
+#define NAND_EXEC_CMD 0x10
+#define NAND_FLASH_STATUS 0x14
+#define NAND_BUFFER_STATUS 0x18
+#define NAND_DEV0_CFG0 0x20
+#define NAND_DEV0_CFG1 0x24
+#define NAND_DEV0_ECC_CFG 0x28
+#define NAND_DEV1_ECC_CFG 0x2c
+#define NAND_DEV1_CFG0 0x30
+#define NAND_DEV1_CFG1 0x34
+#define NAND_READ_ID 0x40
+#define NAND_READ_STATUS 0x44
+#define NAND_DEV_CMD0 0xa0
+#define NAND_DEV_CMD1 0xa4
+#define NAND_DEV_CMD2 0xa8
+#define NAND_DEV_CMD_VLD 0xac
+#define SFLASHC_BURST_CFG 0xe0
+#define NAND_ERASED_CW_DETECT_CFG 0xe8
+#define NAND_ERASED_CW_DETECT_STATUS 0xec
+#define NAND_EBI2_ECC_BUF_CFG 0xf0
+#define FLASH_BUF_ACC 0x100
+
+#define NAND_CTRL 0xf00
+#define NAND_VERSION 0xf08
+#define NAND_READ_LOCATION_0 0xf20
+#define NAND_READ_LOCATION_1 0xf24
+#define NAND_READ_LOCATION_2 0xf28
+#define NAND_READ_LOCATION_3 0xf2c
+
+/* dummy register offsets, used by write_reg_dma */
+#define NAND_DEV_CMD1_RESTORE 0xdead
+#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
+
+/* NAND_FLASH_CMD bits */
+#define PAGE_ACC BIT(4)
+#define LAST_PAGE BIT(5)
+
+/* NAND_FLASH_CHIP_SELECT bits */
+#define NAND_DEV_SEL 0
+#define DM_EN BIT(2)
+
+/* NAND_FLASH_STATUS bits */
+#define FS_OP_ERR BIT(4)
+#define FS_READY_BSY_N BIT(5)
+#define FS_MPU_ERR BIT(8)
+#define FS_DEVICE_STS_ERR BIT(16)
+#define FS_DEVICE_WP BIT(23)
+
+/* NAND_BUFFER_STATUS bits */
+#define BS_UNCORRECTABLE_BIT BIT(8)
+#define BS_CORRECTABLE_ERR_MSK 0x1f
+
+/* NAND_DEVn_CFG0 bits */
+#define DISABLE_STATUS_AFTER_WRITE 4
+#define CW_PER_PAGE 6
+#define UD_SIZE_BYTES 9
+#define ECC_PARITY_SIZE_BYTES_RS 19
+#define SPARE_SIZE_BYTES 23
+#define NUM_ADDR_CYCLES 27
+#define STATUS_BFR_READ 30
+#define SET_RD_MODE_AFTER_STATUS 31
+
+/* NAND_DEVn_CFG0 bits */
+#define DEV0_CFG1_ECC_DISABLE 0
+#define WIDE_FLASH 1
+#define NAND_RECOVERY_CYCLES 2
+#define CS_ACTIVE_BSY 5
+#define BAD_BLOCK_BYTE_NUM 6
+#define BAD_BLOCK_IN_SPARE_AREA 16
+#define WR_RD_BSY_GAP 17
+#define ENABLE_BCH_ECC 27
+
+/* NAND_DEV0_ECC_CFG bits */
+#define ECC_CFG_ECC_DISABLE 0
+#define ECC_SW_RESET 1
+#define ECC_MODE 4
+#define ECC_PARITY_SIZE_BYTES_BCH 8
+#define ECC_NUM_DATA_BYTES 16
+#define ECC_FORCE_CLK_OPEN 30
+
+/* NAND_DEV_CMD1 bits */
+#define READ_ADDR 0
+
+/* NAND_DEV_CMD_VLD bits */
+#define READ_START_VLD BIT(0)
+#define READ_STOP_VLD BIT(1)
+#define WRITE_START_VLD BIT(2)
+#define ERASE_START_VLD BIT(3)
+#define SEQ_READ_START_VLD BIT(4)
+
+/* NAND_EBI2_ECC_BUF_CFG bits */
+#define NUM_STEPS 0
+
+/* NAND_ERASED_CW_DETECT_CFG bits */
+#define ERASED_CW_ECC_MASK 1
+#define AUTO_DETECT_RES 0
+#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
+#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
+#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
+#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
+
+/* NAND_ERASED_CW_DETECT_STATUS bits */
+#define PAGE_ALL_ERASED BIT(7)
+#define CODEWORD_ALL_ERASED BIT(6)
+#define PAGE_ERASED BIT(5)
+#define CODEWORD_ERASED BIT(4)
+#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
+#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+
+/* NAND_READ_LOCATION_n bits */
+#define READ_LOCATION_OFFSET 0
+#define READ_LOCATION_SIZE 16
+#define READ_LOCATION_LAST 31
+
+/* Version Mask */
+#define NAND_VERSION_MAJOR_MASK 0xf0000000
+#define NAND_VERSION_MAJOR_SHIFT 28
+#define NAND_VERSION_MINOR_MASK 0x0fff0000
+#define NAND_VERSION_MINOR_SHIFT 16
+
+/* NAND OP_CMDs */
+#define OP_PAGE_READ 0x2
+#define OP_PAGE_READ_WITH_ECC 0x3
+#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
+#define OP_PROGRAM_PAGE 0x6
+#define OP_PAGE_PROGRAM_WITH_ECC 0x7
+#define OP_PROGRAM_PAGE_SPARE 0x9
+#define OP_BLOCK_ERASE 0xa
+#define OP_FETCH_ID 0xb
+#define OP_RESET_DEVICE 0xd
+
+/* Default Value for NAND_DEV_CMD_VLD */
+#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
+ ERASE_START_VLD | SEQ_READ_START_VLD)
+
+/* NAND_CTRL bits */
+#define BAM_MODE_EN BIT(0)
+
+/*
+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+ * the driver calls the chunks 'step' or 'codeword' interchangeably
+ */
+#define NANDC_STEP_SIZE 512
+
+/*
+ * the largest page size we support is 8K, this will have 16 steps/codewords
+ * of 512 bytes each
+ */
+#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
+
+/* we read at most 3 registers per codeword scan */
+#define MAX_REG_RD (3 * MAX_NUM_STEPS)
+
+/* ECC modes supported by the controller */
+#define ECC_NONE BIT(0)
+#define ECC_RS_4BIT BIT(1)
+#define ECC_BCH_4BIT BIT(2)
+#define ECC_BCH_8BIT BIT(3)
+
+#define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
+nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
+ ((offset) << READ_LOCATION_OFFSET) | \
+ ((size) << READ_LOCATION_SIZE) | \
+ ((is_last) << READ_LOCATION_LAST))
+
+/*
+ * Returns the actual register address for all NAND_DEV_ registers
+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+ */
+#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+
+/* Returns the NAND register physical address */
+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
+#define QPIC_PER_CW_CMD_SGL 32
+#define QPIC_PER_CW_DATA_SGL 8
+
+#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
+/*
+ * Flags used in DMA descriptor preparation helper functions
+ * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
+ */
+/* Don't set the EOT in current tx BAM sgl */
+#define NAND_BAM_NO_EOT BIT(0)
+/* Set the NWD flag in current BAM sgl */
+#define NAND_BAM_NWD BIT(1)
+/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
+#define NAND_BAM_NEXT_SGL BIT(2)
+/*
+ * Erased codeword status is being used two times in single transfer so this
+ * flag will determine the current value of erased codeword status register
+ */
+#define NAND_ERASED_CW_SET BIT(4)
+
+/*
+ * This data type corresponds to the BAM transaction which will be used for all
+ * NAND transfers.
+ * @bam_ce - the array of BAM command elements
+ * @cmd_sgl - sgl for NAND BAM command pipe
+ * @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
+ * @cmd_sgl_pos - current index in command sgl.
+ * @cmd_sgl_start - start index in command sgl.
+ * @tx_sgl_pos - current index in data sgl for tx.
+ * @tx_sgl_start - start index in data sgl for tx.
+ * @rx_sgl_pos - current index in data sgl for rx.
+ * @rx_sgl_start - start index in data sgl for rx.
+ * @wait_second_completion - wait for second DMA desc completion before making
+ * the NAND transfer completion.
+ * @txn_done - completion for NAND transfer.
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
+ */
+struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
+ struct scatterlist *cmd_sgl;
+ struct scatterlist *data_sgl;
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
+ u32 cmd_sgl_pos;
+ u32 cmd_sgl_start;
+ u32 tx_sgl_pos;
+ u32 tx_sgl_start;
+ u32 rx_sgl_pos;
+ u32 rx_sgl_start;
+ bool wait_second_completion;
+ struct completion txn_done;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+};
+
+/*
+ * This data type corresponds to the nand dma descriptor
+ * @list - list for desc_info
+ * @dir - DMA transfer direction
+ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
+ * ADM
+ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
+ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
+ * @dma_desc - low level DMA engine descriptor
+ */
+struct desc_info {
+ struct list_head node;
+
+ enum dma_data_direction dir;
+ union {
+ struct scatterlist adm_sgl;
+ struct {
+ struct scatterlist *bam_sgl;
+ int sgl_cnt;
+ };
+ };
+ struct dma_async_tx_descriptor *dma_desc;
+};
+
+/*
+ * holds the current register values that we want to write. acts as a contiguous
+ * chunk of memory which we use to write the controller registers through DMA.
+ */
+struct nandc_regs {
+ __le32 cmd;
+ __le32 addr0;
+ __le32 addr1;
+ __le32 chip_sel;
+ __le32 exec;
+
+ __le32 cfg0;
+ __le32 cfg1;
+ __le32 ecc_bch_cfg;
+
+ __le32 clrflashstatus;
+ __le32 clrreadstatus;
+
+ __le32 cmd1;
+ __le32 vld;
+
+ __le32 orig_cmd1;
+ __le32 orig_vld;
+
+ __le32 ecc_buf_cfg;
+ __le32 read_location0;
+ __le32 read_location1;
+ __le32 read_location2;
+ __le32 read_location3;
+
+ __le32 erased_cw_detect_cfg_clr;
+ __le32 erased_cw_detect_cfg_set;
+};
+
+/*
+ * NAND controller data struct
+ *
+ * @controller: base controller structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ * @dev: parent device
+ * @base: MMIO base
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
+ * @core_clk: controller clock
+ * @aon_clk: another controller clock
+ *
+ * @chan: dma channel
+ * @cmd_crci: ADM DMA CRCI for command flow control
+ * @data_crci: ADM DMA CRCI for data flow control
+ * @desc_list: DMA descriptor list (list of desc_infos)
+ *
+ * @data_buffer: our local DMA buffer for page read/writes,
+ * used when we can't use the buffer provided
+ * by upper layers directly
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
+ * @reg_read_buf: local buffer for reading back registers via DMA
+ * @reg_read_dma: contains dma address for register read buffer
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ * @cmd1/vld: some fixed controller register values
+ * @props: properties of current NAND controller,
+ * initialized via DT match data
+ * @max_cwperpage: maximum QPIC codewords required. calculated
+ * from all connected NAND devices pagesize
+ */
+struct qcom_nand_controller {
+ struct nand_controller controller;
+ struct list_head host_list;
+
+ struct device *dev;
+
+ void __iomem *base;
+ phys_addr_t base_phys;
+ dma_addr_t base_dma;
+
+ struct clk *core_clk;
+ struct clk *aon_clk;
+
+ union {
+ /* will be used only by QPIC for BAM DMA */
+ struct {
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ struct dma_chan *cmd_chan;
+ };
+
+ /* will be used only by EBI2 for ADM DMA */
+ struct {
+ struct dma_chan *chan;
+ unsigned int cmd_crci;
+ unsigned int data_crci;
+ };
+ };
+
+ struct list_head desc_list;
+ struct bam_transaction *bam_txn;
+
+ u8 *data_buffer;
+ int buf_size;
+ int buf_count;
+ int buf_start;
+ unsigned int max_cwperpage;
+
+ __le32 *reg_read_buf;
+ dma_addr_t reg_read_dma;
+ int reg_read_pos;
+
+ struct nandc_regs *regs;
+
+ u32 cmd1, vld;
+ const struct qcom_nandc_props *props;
+};
+
+/*
+ * NAND chip structure
+ *
+ * @chip: base NAND chip structure
+ * @node: list node to add itself to host_list in
+ * qcom_nand_controller
+ *
+ * @cs: chip select value for this chip
+ * @cw_size: the number of bytes in a single step/codeword
+ * of a page, consisting of all data, ecc, spare
+ * and reserved bytes
+ * @cw_data: the number of bytes within a codeword protected
+ * by ECC
+ * @use_ecc: request the controller to use ECC for the
+ * upcoming read/write
+ * @bch_enabled: flag to tell whether BCH ECC mode is used
+ * @ecc_bytes_hw: ECC bytes used by controller hardware for this
+ * chip
+ * @status: value to be returned if NAND_CMD_STATUS command
+ * is executed
+ * @last_command: keeps track of last command on this chip. used
+ * for reading correct status
+ *
+ * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
+ * ecc/non-ecc mode for the current nand flash
+ * device
+ */
+struct qcom_nand_host {
+ struct nand_chip chip;
+ struct list_head node;
+
+ int cs;
+ int cw_size;
+ int cw_data;
+ bool use_ecc;
+ bool bch_enabled;
+ int ecc_bytes_hw;
+ int spare_bytes;
+ int bbm_size;
+ u8 status;
+ int last_command;
+
+ u32 cfg0, cfg1;
+ u32 cfg0_raw, cfg1_raw;
+ u32 ecc_buf_cfg;
+ u32 ecc_bch_cfg;
+ u32 clrflashstatus;
+ u32 clrreadstatus;
+};
+
+/*
+ * This data type corresponds to the NAND controller properties which varies
+ * among different NAND controllers.
+ * @ecc_modes - ecc mode for NAND
+ * @is_bam - whether NAND controller is using BAM
+ * @is_qpic - whether NAND CTRL is part of qpic IP
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ */
+struct qcom_nandc_props {
+ u32 ecc_modes;
+ bool is_bam;
+ bool is_qpic;
+ u32 dev_cmd_reg_start;
+};
+
+/* Frees the BAM transaction memory */
+static void free_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ devm_kfree(nandc->dev, bam_txn);
+}
+
+/* Allocates and Initializes the BAM transaction */
+static struct bam_transaction *
+alloc_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn;
+ size_t bam_txn_size;
+ unsigned int num_cw = nandc->max_cwperpage;
+ void *bam_txn_buf;
+
+ bam_txn_size =
+ sizeof(*bam_txn) + num_cw *
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+
+ bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
+ if (!bam_txn_buf)
+ return NULL;
+
+ bam_txn = bam_txn_buf;
+ bam_txn_buf += sizeof(*bam_txn);
+
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
+ bam_txn->cmd_sgl = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+
+ bam_txn->data_sgl = bam_txn_buf;
+
+ init_completion(&bam_txn->txn_done);
+
+ return bam_txn;
+}
+
+/* Clears the BAM transaction indexes */
+static void clear_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (!nandc->props->is_bam)
+ return;
+
+ bam_txn->bam_ce_pos = 0;
+ bam_txn->bam_ce_start = 0;
+ bam_txn->cmd_sgl_pos = 0;
+ bam_txn->cmd_sgl_start = 0;
+ bam_txn->tx_sgl_pos = 0;
+ bam_txn->tx_sgl_start = 0;
+ bam_txn->rx_sgl_pos = 0;
+ bam_txn->rx_sgl_start = 0;
+ bam_txn->last_data_desc = NULL;
+ bam_txn->wait_second_completion = false;
+
+ sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_CMD_SGL);
+ sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_DATA_SGL);
+
+ reinit_completion(&bam_txn->txn_done);
+}
+
+/* Callback for DMA descriptor completion */
+static void qpic_bam_dma_done(void *data)
+{
+ struct bam_transaction *bam_txn = data;
+
+ /*
+ * In case of data transfer with NAND, 2 callbacks will be generated.
+ * One for command channel and another one for data channel.
+ * If current transaction has data descriptors
+ * (i.e. wait_second_completion is true), then set this to false
+ * and wait for second DMA descriptor completion.
+ */
+ if (bam_txn->wait_second_completion)
+ bam_txn->wait_second_completion = false;
+ else
+ complete(&bam_txn->txn_done);
+}
+
+static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+{
+ return container_of(chip, struct qcom_nand_host, chip);
+}
+
+static inline struct qcom_nand_controller *
+get_qcom_nand_controller(struct nand_chip *chip)
+{
+ return container_of(chip->controller, struct qcom_nand_controller,
+ controller);
+}
+
+static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+{
+ return ioread32(nandc->base + offset);
+}
+
+static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
+{
+ iowrite32(val, nandc->base + offset);
+}
+
+static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
+ bool is_cpu)
+{
+ if (!nandc->props->is_bam)
+ return;
+
+ if (is_cpu)
+ dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ else
+ dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+}
+
+static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+{
+ switch (offset) {
+ case NAND_FLASH_CMD:
+ return &regs->cmd;
+ case NAND_ADDR0:
+ return &regs->addr0;
+ case NAND_ADDR1:
+ return &regs->addr1;
+ case NAND_FLASH_CHIP_SELECT:
+ return &regs->chip_sel;
+ case NAND_EXEC_CMD:
+ return &regs->exec;
+ case NAND_FLASH_STATUS:
+ return &regs->clrflashstatus;
+ case NAND_DEV0_CFG0:
+ return &regs->cfg0;
+ case NAND_DEV0_CFG1:
+ return &regs->cfg1;
+ case NAND_DEV0_ECC_CFG:
+ return &regs->ecc_bch_cfg;
+ case NAND_READ_STATUS:
+ return &regs->clrreadstatus;
+ case NAND_DEV_CMD1:
+ return &regs->cmd1;
+ case NAND_DEV_CMD1_RESTORE:
+ return &regs->orig_cmd1;
+ case NAND_DEV_CMD_VLD:
+ return &regs->vld;
+ case NAND_DEV_CMD_VLD_RESTORE:
+ return &regs->orig_vld;
+ case NAND_EBI2_ECC_BUF_CFG:
+ return &regs->ecc_buf_cfg;
+ case NAND_READ_LOCATION_0:
+ return &regs->read_location0;
+ case NAND_READ_LOCATION_1:
+ return &regs->read_location1;
+ case NAND_READ_LOCATION_2:
+ return &regs->read_location2;
+ case NAND_READ_LOCATION_3:
+ return &regs->read_location3;
+ default:
+ return NULL;
+ }
+}
+
+static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
+{
+ struct nandc_regs *regs = nandc->regs;
+ __le32 *reg;
+
+ reg = offset_to_nandc_reg(regs, offset);
+
+ if (reg)
+ *reg = cpu_to_le32(val);
+}
+
+/* helper to configure address register values */
+static void set_address(struct qcom_nand_host *host, u16 column, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+
+ nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
+ nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
+}
+
+/*
+ * update_rw_regs: set up read/write register values, these will be
+ * written to the NAND controller registers via DMA
+ *
+ * @num_cw: number of steps for the read/write operation
+ * @read: read or write operation
+ */
+static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u32 cmd, cfg0, cfg1, ecc_bch_cfg;
+
+ if (read) {
+ if (host->use_ecc)
+ cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+ else
+ cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
+ } else {
+ cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+ }
+
+ if (host->use_ecc) {
+ cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+
+ cfg1 = host->cfg1;
+ ecc_bch_cfg = host->ecc_bch_cfg;
+ } else {
+ cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+
+ cfg1 = host->cfg1_raw;
+ ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ }
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
+ nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
+ nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
+ nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
+ nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
+ nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
+ nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+ if (read)
+ nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
+ host->cw_data : host->cw_size, 1);
+}
+
+/*
+ * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
+ * for BAM. This descriptor will be added in the NAND DMA descriptor queue
+ * which will be submitted to DMA engine.
+ */
+static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan,
+ unsigned long flags)
+{
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+ unsigned int sgl_cnt;
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ enum dma_transfer_direction dir_eng;
+ struct dma_async_tx_descriptor *dma_desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ if (chan == nandc->cmd_chan) {
+ sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
+ sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
+ bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else if (chan == nandc->tx_chan) {
+ sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
+ sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
+ bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else {
+ sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
+ sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
+ bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ }
+
+ sg_mark_end(sgl + sgl_cnt - 1);
+ ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ if (ret == 0) {
+ dev_err(nandc->dev, "failure in mapping desc\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ desc->sgl_cnt = sgl_cnt;
+ desc->bam_sgl = sgl;
+
+ dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
+ flags);
+
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failure in prep desc\n");
+ dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ kfree(desc);
+ return -EINVAL;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ /* update last data/command descriptor */
+ if (chan == nandc->cmd_chan)
+ bam_txn->last_cmd_desc = dma_desc;
+ else
+ bam_txn->last_data_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+}
+
+/*
+ * Prepares the command descriptor for BAM DMA which will be used for NAND
+ * register reads and writes. The command descriptor requires the command
+ * to be formed in command element type so this function uses the command
+ * element from bam transaction ce array and fills the same with required
+ * data. A single SGL can contain multiple command elements so
+ * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+ * after the current command element.
+ */
+static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE |
+ DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Prepares the data descriptor for BAM DMA which will be used for NAND
+ * data reads and writes.
+ */
+static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr,
+ int size, unsigned int flags)
+{
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (read) {
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
+ vaddr, size);
+ bam_txn->rx_sgl_pos++;
+ } else {
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
+ vaddr, size);
+ bam_txn->tx_sgl_pos++;
+
+ /*
+ * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
+ * is not set, form the DMA descriptor
+ */
+ if (!(flags & NAND_BAM_NO_EOT)) {
+ ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size,
+ bool flow_control)
+{
+ struct desc_info *desc;
+ struct dma_async_tx_descriptor *dma_desc;
+ struct scatterlist *sgl;
+ struct dma_slave_config slave_conf;
+ enum dma_transfer_direction dir_eng;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ sgl = &desc->adm_sgl;
+
+ sg_init_one(sgl, vaddr, size);
+
+ if (read) {
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ } else {
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ }
+
+ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+ if (ret == 0) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memset(&slave_conf, 0x00, sizeof(slave_conf));
+
+ slave_conf.device_fc = flow_control;
+ if (read) {
+ slave_conf.src_maxburst = 16;
+ slave_conf.src_addr = nandc->base_dma + reg_off;
+ slave_conf.slave_id = nandc->data_crci;
+ } else {
+ slave_conf.dst_maxburst = 16;
+ slave_conf.dst_addr = nandc->base_dma + reg_off;
+ slave_conf.slave_id = nandc->cmd_crci;
+ }
+
+ ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+ if (ret) {
+ dev_err(nandc->dev, "failed to configure dma channel\n");
+ goto err;
+ }
+
+ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failed to prepare desc\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+err:
+ kfree(desc);
+
+ return ret;
+}
+
+/*
+ * read_reg_dma: prepares a descriptor to read a given number of
+ * contiguous registers to the reg_read_buf pointer
+ *
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to read
+ * @flags: flags to control DMA descriptor preparation
+ */
+static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+ void *vaddr;
+
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
+
+ if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, first);
+
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
+
+ return prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+
+/*
+ * write_reg_dma: prepares a descriptor to write a given number of
+ * contiguous registers
+ *
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to write
+ * @flags: flags to control DMA descriptor preparation
+ */
+static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+ struct nandc_regs *regs = nandc->regs;
+ void *vaddr;
+
+ vaddr = offset_to_nandc_reg(regs, first);
+
+ if (first == NAND_ERASED_CW_DETECT_CFG) {
+ if (flags & NAND_ERASED_CW_SET)
+ vaddr = &regs->erased_cw_detect_cfg_set;
+ else
+ vaddr = &regs->erased_cw_detect_cfg_clr;
+ }
+
+ if (first == NAND_EXEC_CMD)
+ flags |= NAND_BAM_NWD;
+
+ if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
+
+ if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
+
+ return prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+
+/*
+ * read_data_dma: prepares a DMA descriptor to transfer data from the
+ * controller's internal buffer to the buffer 'vaddr'
+ *
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ */
+static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+
+ return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+}
+
+/*
+ * write_data_dma: prepares a DMA descriptor to transfer data from
+ * 'vaddr' to the controller's internal buffer
+ *
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to read from
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ */
+static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+
+ return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+}
+
+/*
+ * Helper to prepare DMA descriptors for configuring registers
+ * before reading a NAND page.
+ */
+static void config_nand_page_read(struct qcom_nand_controller *nandc)
+{
+ write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+ write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+}
+
+/*
+ * Helper to prepare DMA descriptors for configuring registers
+ * before reading each codeword in NAND page.
+ */
+static void
+config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
+{
+ if (nandc->props->is_bam)
+ write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
+ NAND_BAM_NEXT_SGL);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ if (use_ecc) {
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+ read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+ } else {
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ }
+}
+
+/*
+ * Helper to prepare dma descriptors to configure registers needed for reading a
+ * single codeword in page
+ */
+static void
+config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
+ bool use_ecc)
+{
+ config_nand_page_read(nandc);
+ config_nand_cw_read(nandc, use_ecc);
+}
+
+/*
+ * Helper to prepare DMA descriptors used to configure registers needed for
+ * before writing a NAND page.
+ */
+static void config_nand_page_write(struct qcom_nand_controller *nandc)
+{
+ write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
+ NAND_BAM_NEXT_SGL);
+}
+
+/*
+ * Helper to prepare DMA descriptors for configuring registers
+ * before writing each codeword in NAND page.
+ */
+static void config_nand_cw_write(struct qcom_nand_controller *nandc)
+{
+ write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
+ write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
+}
+
+/*
+ * the following functions are used within chip->legacy.cmdfunc() to
+ * perform different NAND_CMD_* commands
+ */
+
+/* sets up descriptors for NAND_CMD_PARAM */
+static int nandc_param(struct qcom_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ /*
+ * NAND_CMD_PARAM is called before we know much about the FLASH chip
+ * in use. we configure the controller to perform a raw read of 512
+ * bytes to read onfi params
+ */
+ nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
+ nandc_set_reg(nandc, NAND_ADDR0, 0);
+ nandc_set_reg(nandc, NAND_ADDR1, 0);
+ nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+ | 512 << UD_SIZE_BYTES
+ | 5 << NUM_ADDR_CYCLES
+ | 0 << SPARE_SIZE_BYTES);
+ nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | 17 << BAD_BLOCK_BYTE_NUM
+ | 1 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | 0 << WIDE_FLASH
+ | 1 << DEV0_CFG1_ECC_DISABLE);
+ nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+
+ /* configure CMD1 and VLD for ONFI param probing */
+ nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
+ (nandc->vld & ~READ_START_VLD));
+ nandc_set_reg(nandc, NAND_DEV_CMD1,
+ (nandc->cmd1 & ~(0xFF << READ_ADDR))
+ | NAND_CMD_PARAM << READ_ADDR);
+
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+ nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+ nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+ nandc_set_read_loc(nandc, 0, 0, 512, 1);
+
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
+ write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+
+ nandc->buf_count = 512;
+ memset(nandc->data_buffer, 0xff, nandc->buf_count);
+
+ config_nand_single_cw_page_read(nandc, false);
+
+ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count, 0);
+
+ /* restore CMD1 and VLD regs */
+ write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
+
+ return 0;
+}
+
+/* sets up descriptors for NAND_CMD_ERASE1 */
+static int erase_block(struct qcom_nand_host *host, int page_addr)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD,
+ OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
+ nandc_set_reg(nandc, NAND_ADDR0, page_addr);
+ nandc_set_reg(nandc, NAND_ADDR1, 0);
+ nandc_set_reg(nandc, NAND_DEV0_CFG0,
+ host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+ nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
+ nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
+ write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ return 0;
+}
+
+/* sets up descriptors for NAND_CMD_READID */
+static int read_id(struct qcom_nand_host *host, int column)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (column == -1)
+ return 0;
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
+ nandc_set_reg(nandc, NAND_ADDR0, column);
+ nandc_set_reg(nandc, NAND_ADDR1, 0);
+ nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
+ nandc->props->is_bam ? 0 : DM_EN);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+
+ return 0;
+}
+
+/* sets up descriptors for NAND_CMD_RESET */
+static int reset(struct qcom_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ return 0;
+}
+
+/* helpers to submit/free our list of dma descriptors */
+static int submit_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc;
+ dma_cookie_t cookie = 0;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ int r;
+
+ if (nandc->props->is_bam) {
+ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+ r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ if (r)
+ return r;
+ }
+
+ if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+ r = prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (r)
+ return r;
+ }
+
+ if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+ r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
+ if (r)
+ return r;
+ }
+ }
+
+ list_for_each_entry(desc, &nandc->desc_list, node)
+ cookie = dmaengine_submit(desc->dma_desc);
+
+ if (nandc->props->is_bam) {
+ bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback_param = bam_txn;
+ if (bam_txn->last_data_desc) {
+ bam_txn->last_data_desc->callback = qpic_bam_dma_done;
+ bam_txn->last_data_desc->callback_param = bam_txn;
+ bam_txn->wait_second_completion = true;
+ }
+
+ dma_async_issue_pending(nandc->tx_chan);
+ dma_async_issue_pending(nandc->rx_chan);
+ dma_async_issue_pending(nandc->cmd_chan);
+
+ if (!wait_for_completion_timeout(&bam_txn->txn_done,
+ QPIC_NAND_COMPLETION_TIMEOUT))
+ return -ETIMEDOUT;
+ } else {
+ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void free_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc, *n;
+
+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ list_del(&desc->node);
+
+ if (nandc->props->is_bam)
+ dma_unmap_sg(nandc->dev, desc->bam_sgl,
+ desc->sgl_cnt, desc->dir);
+ else
+ dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
+ desc->dir);
+
+ kfree(desc);
+ }
+}
+
+/* reset the register read buffer for next NAND operation */
+static void clear_read_regs(struct qcom_nand_controller *nandc)
+{
+ nandc->reg_read_pos = 0;
+ nandc_read_buffer_sync(nandc, false);
+}
+
+static void pre_command(struct qcom_nand_host *host, int command)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+ host->last_command = command;
+
+ clear_read_regs(nandc);
+
+ if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
+ command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
+ clear_bam_transaction(nandc);
+}
+
+/*
+ * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
+ * privately maintained status byte, this status byte can be read after
+ * NAND_CMD_STATUS is called
+ */
+static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int num_cw;
+ int i;
+
+ num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
+ nandc_read_buffer_sync(nandc, true);
+
+ for (i = 0; i < num_cw; i++) {
+ u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash_status & FS_MPU_ERR)
+ host->status &= ~NAND_STATUS_WP;
+
+ if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
+ (flash_status &
+ FS_DEVICE_STS_ERR)))
+ host->status |= NAND_STATUS_FAIL;
+ }
+}
+
+static void post_command(struct qcom_nand_host *host, int command)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ switch (command) {
+ case NAND_CMD_READID:
+ nandc_read_buffer_sync(nandc, true);
+ memcpy(nandc->data_buffer, nandc->reg_read_buf,
+ nandc->buf_count);
+ break;
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ parse_erase_write_errors(host, command);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Implements chip->legacy.cmdfunc. It's only used for a limited set of
+ * commands. The rest of the commands wouldn't be called by upper layers.
+ * For example, NAND_CMD_READOOB would never be called because we have our own
+ * versions of read_oob ops for nand_ecc_ctrl.
+ */
+static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ bool wait = false;
+ int ret = 0;
+
+ pre_command(host, command);
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ ret = reset(host);
+ wait = true;
+ break;
+
+ case NAND_CMD_READID:
+ nandc->buf_count = 4;
+ ret = read_id(host, column);
+ wait = true;
+ break;
+
+ case NAND_CMD_PARAM:
+ ret = nandc_param(host);
+ wait = true;
+ break;
+
+ case NAND_CMD_ERASE1:
+ ret = erase_block(host, page_addr);
+ wait = true;
+ break;
+
+ case NAND_CMD_READ0:
+ /* we read the entire page for now */
+ WARN_ON(column != 0);
+
+ host->use_ecc = true;
+ set_address(host, 0, page_addr);
+ update_rw_regs(host, ecc->steps, true);
+ break;
+
+ case NAND_CMD_SEQIN:
+ WARN_ON(column != 0);
+ set_address(host, 0, page_addr);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_NONE:
+ default:
+ break;
+ }
+
+ if (ret) {
+ dev_err(nandc->dev, "failure executing command %d\n",
+ command);
+ free_descs(nandc);
+ return;
+ }
+
+ if (wait) {
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev,
+ "failure submitting descs for command %d\n",
+ command);
+ }
+
+ free_descs(nandc);
+
+ post_command(host, command);
+}
+
+/*
+ * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
+ * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
+ *
+ * when using RS ECC, the HW reports the same erros when reading an erased CW,
+ * but it notifies that it is an erased CW by placing special characters at
+ * certain offsets in the buffer.
+ *
+ * verify if the page is erased or not, and fix up the page for RS ECC by
+ * replacing the special characters with 0xff.
+ */
+static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
+{
+ u8 empty1, empty2;
+
+ /*
+ * an erased page flags an error in NAND_FLASH_STATUS, check if the page
+ * is erased by looking for 0x54s at offsets 3 and 175 from the
+ * beginning of each codeword
+ */
+
+ empty1 = data_buf[3];
+ empty2 = data_buf[175];
+
+ /*
+ * if the erased codework markers, if they exist override them with
+ * 0xffs
+ */
+ if ((empty1 == 0x54 && empty2 == 0xff) ||
+ (empty1 == 0xff && empty2 == 0x54)) {
+ data_buf[3] = 0xff;
+ data_buf[175] = 0xff;
+ }
+
+ /*
+ * check if the entire chunk contains 0xffs or not. if it doesn't, then
+ * restore the original values at the special offsets
+ */
+ if (memchr_inv(data_buf, 0xff, data_len)) {
+ data_buf[3] = empty1;
+ data_buf[175] = empty2;
+
+ return false;
+ }
+
+ return true;
+}
+
+struct read_stats {
+ __le32 flash;
+ __le32 buffer;
+ __le32 erased_cw;
+};
+
+/* reads back FLASH_STATUS register set by the controller */
+static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int i;
+
+ nandc_read_buffer_sync(nandc, true);
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* performs raw read for one codeword */
+static int
+qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf, int page, int cw)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ host->use_ecc = false;
+
+ clear_bam_transaction(nandc);
+ set_address(host, host->cw_size * cw, page);
+ update_rw_regs(host, 1, true);
+ config_nand_page_read(nandc);
+
+ data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+ oob_size1 = host->bbm_size;
+
+ if (cw == (ecc->steps - 1)) {
+ data_size2 = ecc->size - data_size1 -
+ ((ecc->steps - 1) * 4);
+ oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size2 = host->cw_data - data_size1;
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ if (nandc->props->is_bam) {
+ nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+ nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
+ read_loc += oob_size1;
+
+ nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
+ read_loc += data_size2;
+
+ nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
+ }
+
+ config_nand_cw_read(nandc, false);
+
+ read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ reg_off += data_size1;
+
+ read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ reg_off += oob_size1;
+
+ read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ reg_off += data_size2;
+
+ read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+
+ ret = submit_descs(nandc);
+ free_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
+ return ret;
+ }
+
+ return check_flash_errors(host, 1);
+}
+
+/*
+ * Bitflips can happen in erased codewords also so this function counts the
+ * number of 0 in each CW for which ECC engine returns the uncorrectable
+ * error. The page will be assumed as erased if this count is less than or
+ * equal to the ecc->strength for each CW.
+ *
+ * 1. Both DATA and OOB need to be checked for number of 0. The
+ * top-level API can be called with only data buf or OOB buf so use
+ * chip->data_buf if data buf is null and chip->oob_poi if oob buf
+ * is null for copying the raw bytes.
+ * 2. Perform raw read for all the CW which has uncorrectable errors.
+ * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
+ * The BBM and spare bytes bit flip won’t affect the ECC so don’t check
+ * the number of bitflips in this area.
+ */
+static int
+check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf, unsigned long uncorrectable_cws,
+ int page, unsigned int max_bitflips)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *cw_data_buf, *cw_oob_buf;
+ int cw, data_size, oob_size, ret = 0;
+
+ if (!data_buf)
+ data_buf = nand_get_data_buf(chip);
+
+ if (!oob_buf) {
+ nand_get_data_buf(chip);
+ oob_buf = chip->oob_poi;
+ }
+
+ for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
+ if (cw == (ecc->steps - 1)) {
+ data_size = ecc->size - ((ecc->steps - 1) * 4);
+ oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
+ } else {
+ data_size = host->cw_data;
+ oob_size = host->ecc_bytes_hw;
+ }
+
+ /* determine starting buffer address for current CW */
+ cw_data_buf = data_buf + (cw * host->cw_data);
+ cw_oob_buf = oob_buf + (cw * ecc->bytes);
+
+ ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
+ cw_oob_buf, page, cw);
+ if (ret)
+ return ret;
+
+ /*
+ * make sure it isn't an erased page reported
+ * as not-erased by HW because of a few bitflips
+ */
+ ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
+ cw_oob_buf + host->bbm_size,
+ oob_size, NULL,
+ 0, ecc->strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ }
+ }
+
+ return max_bitflips;
+}
+
+/*
+ * reads back status registers set by the controller to notify page read
+ * errors. this is equivalent to what 'ecc->correct()' would do.
+ */
+static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ unsigned int max_bitflips = 0, uncorrectable_cws = 0;
+ struct read_stats *buf;
+ bool flash_op_err = false, erased;
+ int i;
+ u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+
+ buf = (struct read_stats *)nandc->reg_read_buf;
+ nandc_read_buffer_sync(nandc, true);
+
+ for (i = 0; i < ecc->steps; i++, buf++) {
+ u32 flash, buffer, erased_cw;
+ int data_len, oob_len;
+
+ if (i == (ecc->steps - 1)) {
+ data_len = ecc->size - ((ecc->steps - 1) << 2);
+ oob_len = ecc->steps << 2;
+ } else {
+ data_len = host->cw_data;
+ oob_len = 0;
+ }
+
+ flash = le32_to_cpu(buf->flash);
+ buffer = le32_to_cpu(buf->buffer);
+ erased_cw = le32_to_cpu(buf->erased_cw);
+
+ /*
+ * Check ECC failure for each codeword. ECC failure can
+ * happen in either of the following conditions
+ * 1. If number of bitflips are greater than ECC engine
+ * capability.
+ * 2. If this codeword contains all 0xff for which erased
+ * codeword detection check will be done.
+ */
+ if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
+ /*
+ * For BCH ECC, ignore erased codeword errors, if
+ * ERASED_CW bits are set.
+ */
+ if (host->bch_enabled) {
+ erased = (erased_cw & ERASED_CW) == ERASED_CW ?
+ true : false;
+ /*
+ * For RS ECC, HW reports the erased CW by placing
+ * special characters at certain offsets in the buffer.
+ * These special characters will be valid only if
+ * complete page is read i.e. data_buf is not NULL.
+ */
+ } else if (data_buf) {
+ erased = erased_chunk_check_and_fixup(data_buf,
+ data_len);
+ } else {
+ erased = false;
+ }
+
+ if (!erased)
+ uncorrectable_cws |= BIT(i);
+ /*
+ * Check if MPU or any other operational error (timeout,
+ * device failure, etc.) happened for this codeword and
+ * make flash_op_err true. If flash_op_err is set, then
+ * EIO will be returned for page read.
+ */
+ } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+ flash_op_err = true;
+ /*
+ * No ECC or operational errors happened. Check the number of
+ * bits corrected and update the ecc_stats.corrected.
+ */
+ } else {
+ unsigned int stat;
+
+ stat = buffer & BS_CORRECTABLE_ERR_MSK;
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max(max_bitflips, stat);
+ }
+
+ if (data_buf)
+ data_buf += data_len;
+ if (oob_buf)
+ oob_buf += oob_len + ecc->bytes;
+ }
+
+ if (flash_op_err)
+ return -EIO;
+
+ if (!uncorrectable_cws)
+ return max_bitflips;
+
+ return check_for_erased_page(host, data_buf_start, oob_buf_start,
+ uncorrectable_cws, page,
+ max_bitflips);
+}
+
+/*
+ * helper to perform the actual page read operation, used by ecc->read_page(),
+ * ecc->read_oob()
+ */
+static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+ int i, ret;
+
+ config_nand_page_read(nandc);
+
+ /* queue cmd descs for each codeword */
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size, oob_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size = host->cw_data;
+ oob_size = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ if (nandc->props->is_bam) {
+ if (data_buf && oob_buf) {
+ nandc_set_read_loc(nandc, 0, 0, data_size, 0);
+ nandc_set_read_loc(nandc, 1, data_size,
+ oob_size, 1);
+ } else if (data_buf) {
+ nandc_set_read_loc(nandc, 0, 0, data_size, 1);
+ } else {
+ nandc_set_read_loc(nandc, 0, data_size,
+ oob_size, 1);
+ }
+ }
+
+ config_nand_cw_read(nandc, true);
+
+ if (data_buf)
+ read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
+
+ /*
+ * when ecc is enabled, the controller doesn't read the real
+ * or dummy bad block markers in each chunk. To maintain a
+ * consistent layout across RAW and ECC reads, we just
+ * leave the real/dummy BBM offsets empty (i.e, filled with
+ * 0xffs)
+ */
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < host->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = submit_descs(nandc);
+ free_descs(nandc);
+
+ if (ret) {
+ dev_err(nandc->dev, "failure to read page/oob\n");
+ return ret;
+ }
+
+ return parse_read_errors(host, data_buf_start, oob_buf_start, page);
+}
+
+/*
+ * a helper that copies the last step/codeword of a page (containing free oob)
+ * into our local buffer
+ */
+static int copy_last_cw(struct qcom_nand_host *host, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int size;
+ int ret;
+
+ clear_read_regs(nandc);
+
+ size = host->use_ecc ? host->cw_data : host->cw_size;
+
+ /* prepare a clean read buffer */
+ memset(nandc->data_buffer, 0xff, size);
+
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, true);
+
+ config_nand_single_cw_page_read(nandc, host->use_ecc);
+
+ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failed to copy last codeword\n");
+
+ free_descs(nandc);
+
+ return ret;
+}
+
+/* implements ecc->read_page() */
+static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u8 *data_buf, *oob_buf = NULL;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ data_buf = buf;
+ oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ clear_bam_transaction(nandc);
+
+ return read_page_ecc(host, data_buf, oob_buf, page);
+}
+
+/* implements ecc->read_page_raw() */
+static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int cw, ret;
+ u8 *data_buf = buf, *oob_buf = chip->oob_poi;
+
+ for (cw = 0; cw < ecc->steps; cw++) {
+ ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
+ page, cw);
+ if (ret)
+ return ret;
+
+ data_buf += host->cw_data;
+ oob_buf += ecc->bytes;
+ }
+
+ return 0;
+}
+
+/* implements ecc->read_oob() */
+static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ host->use_ecc = true;
+ set_address(host, 0, page);
+ update_rw_regs(host, ecc->steps, true);
+
+ return read_page_ecc(host, NULL, chip->oob_poi, page);
+}
+
+/* implements ecc->write_page() */
+static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf, *oob_buf;
+ int i, ret;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ data_buf = (u8 *)buf;
+ oob_buf = chip->oob_poi;
+
+ host->use_ecc = true;
+ update_rw_regs(host, ecc->steps, false);
+ config_nand_page_write(nandc);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size, oob_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size = host->cw_data;
+ oob_size = ecc->bytes;
+ }
+
+
+ write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+
+ /*
+ * when ECC is enabled, we don't really need to write anything
+ * to oob for the first n - 1 codewords since these oob regions
+ * just contain ECC bytes that's written by the controller
+ * itself. For the last codeword, we skip the bbm positions and
+ * write to the free oob area.
+ */
+ if (i == (ecc->steps - 1)) {
+ oob_buf += host->bbm_size;
+
+ write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ config_nand_cw_write(nandc);
+
+ data_buf += data_size;
+ oob_buf += oob_size;
+ }
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failure to write page\n");
+
+ free_descs(nandc);
+
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
+ return ret;
+}
+
+/* implements ecc->write_page_raw() */
+static int qcom_nandc_write_page_raw(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf, *oob_buf;
+ int i, ret;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ data_buf = (u8 *)buf;
+ oob_buf = chip->oob_poi;
+
+ host->use_ecc = false;
+ update_rw_regs(host, ecc->steps, false);
+ config_nand_page_write(nandc);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int reg_off = FLASH_BUF_ACC;
+
+ data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+ oob_size1 = host->bbm_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size2 = ecc->size - data_size1 -
+ ((ecc->steps - 1) << 2);
+ oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size2 = host->cw_data - data_size1;
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ write_data_dma(nandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+ write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
+ reg_off += oob_size1;
+ oob_buf += oob_size1;
+
+ write_data_dma(nandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+ write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+ oob_buf += oob_size2;
+
+ config_nand_cw_write(nandc);
+ }
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failure to write raw page\n");
+
+ free_descs(nandc);
+
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
+ return ret;
+}
+
+/*
+ * implements ecc->write_oob()
+ *
+ * the NAND controller cannot write only data or only OOB within a codeword
+ * since ECC is calculated for the combined codeword. So update the OOB from
+ * chip->oob_poi, and pad the data area with OxFF before writing.
+ */
+static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *oob = chip->oob_poi;
+ int data_size, oob_size;
+ int ret;
+
+ host->use_ecc = true;
+ clear_bam_transaction(nandc);
+
+ /* calculate the data and oob size for the last codeword/step */
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = mtd->oobavail;
+
+ memset(nandc->data_buffer, 0xff, host->cw_data);
+ /* override new oob content to last codeword */
+ mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
+ 0, mtd->oobavail);
+
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, false);
+
+ config_nand_page_write(nandc);
+ write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, data_size + oob_size, 0);
+ config_nand_cw_write(nandc);
+
+ ret = submit_descs(nandc);
+
+ free_descs(nandc);
+
+ if (ret) {
+ dev_err(nandc->dev, "failure to write oob\n");
+ return -EIO;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int page, ret, bbpos, bad = 0;
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ /*
+ * configure registers for a raw sub page read, the address is set to
+ * the beginning of the last codeword, we don't care about reading ecc
+ * portion of oob. we just want the first few bytes from this codeword
+ * that contains the BBM
+ */
+ host->use_ecc = false;
+
+ clear_bam_transaction(nandc);
+ ret = copy_last_cw(host, page);
+ if (ret)
+ goto err;
+
+ if (check_flash_errors(host, 1)) {
+ dev_warn(nandc->dev, "error when trying to read BBM\n");
+ goto err;
+ }
+
+ bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
+
+ bad = nandc->data_buffer[bbpos] != 0xff;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
+err:
+ return bad;
+}
+
+static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int page, ret;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ /*
+ * to mark the BBM as bad, we flash the entire last codeword with 0s.
+ * we don't care about the rest of the content in the codeword since
+ * we aren't going to use this block again
+ */
+ memset(nandc->data_buffer, 0x00, host->cw_size);
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ /* prepare write */
+ host->use_ecc = false;
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, false);
+
+ config_nand_page_write(nandc);
+ write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, host->cw_size, 0);
+ config_nand_cw_write(nandc);
+
+ ret = submit_descs(nandc);
+
+ free_descs(nandc);
+
+ if (ret) {
+ dev_err(nandc->dev, "failure to update BBM\n");
+ return -EIO;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/*
+ * the three functions below implement chip->legacy.read_byte(),
+ * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these
+ * aren't used for reading/writing page data, they are used for smaller data
+ * like reading id, status etc
+ */
+static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u8 *buf = nandc->data_buffer;
+ u8 ret = 0x0;
+
+ if (host->last_command == NAND_CMD_STATUS) {
+ ret = host->status;
+
+ host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+ return ret;
+ }
+
+ if (nandc->buf_start < nandc->buf_count)
+ ret = buf[nandc->buf_start++];
+
+ return ret;
+}
+
+static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
+
+ memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
+ nandc->buf_start += real_len;
+}
+
+static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
+
+ memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
+
+ nandc->buf_start += real_len;
+}
+
+/* we support only one external chip for now */
+static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (chipnr <= 0)
+ return;
+
+ dev_warn(nandc->dev, "invalid chip select\n");
+}
+
+/*
+ * NAND controller page layout info
+ *
+ * Layout with ECC enabled:
+ *
+ * |----------------------| |---------------------------------|
+ * | xx.......yy| | *********xx.......yy|
+ * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
+ * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
+ * | xx.......yy| | *********xx.......yy|
+ * |----------------------| |---------------------------------|
+ * codeword 1,2..n-1 codeword n
+ * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = Spare/free bytes
+ * x = Unused byte(s)
+ * y = Reserved byte(s)
+ *
+ * 2K page: n = 4, spare = 16 bytes
+ * 4K page: n = 8, spare = 32 bytes
+ * 8K page: n = 16, spare = 64 bytes
+ *
+ * the qcom nand controller operates at a sub page/codeword level. each
+ * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
+ * the number of ECC bytes vary based on the ECC strength and the bus width.
+ *
+ * the first n - 1 codewords contains 516 bytes of user data, the remaining
+ * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
+ * both user data and spare(oobavail) bytes that sum up to 516 bytes.
+ *
+ * When we access a page with ECC enabled, the reserved bytes(s) are not
+ * accessible at all. When reading, we fill up these unreadable positions
+ * with 0xffs. When writing, the controller skips writing the inaccessible
+ * bytes.
+ *
+ * Layout with ECC disabled:
+ *
+ * |------------------------------| |---------------------------------------|
+ * | yy xx.......| | bb *********xx.......|
+ * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
+ * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
+ * | yy xx.......| | bb *********xx.......|
+ * |------------------------------| |---------------------------------------|
+ * codeword 1,2..n-1 codeword n
+ * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = Spare/free bytes
+ * x = Unused byte(s)
+ * y = Dummy Bad Bock byte(s)
+ * b = Real Bad Block byte(s)
+ * size1/size2 = function of codeword size and 'n'
+ *
+ * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
+ * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
+ * Block Markers. In the last codeword, this position contains the real BBM
+ *
+ * In order to have a consistent layout between RAW and ECC modes, we assume
+ * the following OOB layout arrangement:
+ *
+ * |-----------| |--------------------|
+ * |yyxx.......| |bb*********xx.......|
+ * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
+ * |yyxx.......| |bb*********xx.......|
+ * |yyxx.......| |bb*********xx.......|
+ * |-----------| |--------------------|
+ * first n - 1 nth OOB region
+ * OOB regions
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = FREE OOB bytes
+ * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
+ * x = Unused byte(s)
+ * b = Real bad block byte(s) (inaccessible when ECC enabled)
+ *
+ * This layout is read as is when ECC is disabled. When ECC is enabled, the
+ * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
+ * and assumed as 0xffs when we read a page/oob. The ECC, unused and
+ * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
+ * the sum of the three).
+ */
+static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
+ host->bbm_size;
+ oobregion->offset = 0;
+ } else {
+ oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+ }
+
+ return 0;
+}
+
+static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = ecc->steps * 4;
+ oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
+ .ecc = qcom_nand_ooblayout_ecc,
+ .free = qcom_nand_ooblayout_free,
+};
+
+static int
+qcom_nandc_calc_ecc_bytes(int step_size, int strength)
+{
+ return strength == 4 ? 12 : 16;
+}
+NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
+ NANDC_STEP_SIZE, 4, 8);
+
+static int qcom_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int cwperpage, bad_block_byte, ret;
+ bool wide_bus;
+ int ecc_mode = 1;
+
+ /* controller only supports 512 bytes data steps */
+ ecc->size = NANDC_STEP_SIZE;
+ wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
+ cwperpage = mtd->writesize / NANDC_STEP_SIZE;
+
+ /*
+ * Each CW has 4 available OOB bytes which will be protected with ECC
+ * so remaining bytes can be used for ECC.
+ */
+ ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
+ mtd->oobsize - (cwperpage * 4));
+ if (ret) {
+ dev_err(nandc->dev, "No valid ECC settings possible\n");
+ return ret;
+ }
+
+ if (ecc->strength >= 8) {
+ /* 8 bit ECC defaults to BCH ECC on all platforms */
+ host->bch_enabled = true;
+ ecc_mode = 1;
+
+ if (wide_bus) {
+ host->ecc_bytes_hw = 14;
+ host->spare_bytes = 0;
+ host->bbm_size = 2;
+ } else {
+ host->ecc_bytes_hw = 13;
+ host->spare_bytes = 2;
+ host->bbm_size = 1;
+ }
+ } else {
+ /*
+ * if the controller supports BCH for 4 bit ECC, the controller
+ * uses lesser bytes for ECC. If RS is used, the ECC bytes is
+ * always 10 bytes
+ */
+ if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
+ /* BCH */
+ host->bch_enabled = true;
+ ecc_mode = 0;
+
+ if (wide_bus) {
+ host->ecc_bytes_hw = 8;
+ host->spare_bytes = 2;
+ host->bbm_size = 2;
+ } else {
+ host->ecc_bytes_hw = 7;
+ host->spare_bytes = 4;
+ host->bbm_size = 1;
+ }
+ } else {
+ /* RS */
+ host->ecc_bytes_hw = 10;
+
+ if (wide_bus) {
+ host->spare_bytes = 0;
+ host->bbm_size = 2;
+ } else {
+ host->spare_bytes = 1;
+ host->bbm_size = 1;
+ }
+ }
+ }
+
+ /*
+ * we consider ecc->bytes as the sum of all the non-data content in a
+ * step. It gives us a clean representation of the oob area (even if
+ * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
+ * ECC and 12 bytes for 4 bit ECC
+ */
+ ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
+
+ ecc->read_page = qcom_nandc_read_page;
+ ecc->read_page_raw = qcom_nandc_read_page_raw;
+ ecc->read_oob = qcom_nandc_read_oob;
+ ecc->write_page = qcom_nandc_write_page;
+ ecc->write_page_raw = qcom_nandc_write_page_raw;
+ ecc->write_oob = qcom_nandc_write_oob;
+
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+
+ nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+ cwperpage);
+
+ /*
+ * DATA_UD_BYTES varies based on whether the read/write command protects
+ * spare data with ECC too. We protect spare data by default, so we set
+ * it to main + spare data, which are 512 and 4 bytes respectively.
+ */
+ host->cw_data = 516;
+
+ /*
+ * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
+ * for 8 bit ECC
+ */
+ host->cw_size = host->cw_data + ecc->bytes;
+ bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
+
+ host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
+ | host->cw_data << UD_SIZE_BYTES
+ | 0 << DISABLE_STATUS_AFTER_WRITE
+ | 5 << NUM_ADDR_CYCLES
+ | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
+ | 0 << STATUS_BFR_READ
+ | 1 << SET_RD_MODE_AFTER_STATUS
+ | host->spare_bytes << SPARE_SIZE_BYTES;
+
+ host->cfg1 = 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | bad_block_byte << BAD_BLOCK_BYTE_NUM
+ | 0 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | wide_bus << WIDE_FLASH
+ | host->bch_enabled << ENABLE_BCH_ECC;
+
+ host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
+ | host->cw_size << UD_SIZE_BYTES
+ | 5 << NUM_ADDR_CYCLES
+ | 0 << SPARE_SIZE_BYTES;
+
+ host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | 17 << BAD_BLOCK_BYTE_NUM
+ | 1 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | wide_bus << WIDE_FLASH
+ | 1 << DEV0_CFG1_ECC_DISABLE;
+
+ host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
+ | 0 << ECC_SW_RESET
+ | host->cw_data << ECC_NUM_DATA_BYTES
+ | 1 << ECC_FORCE_CLK_OPEN
+ | ecc_mode << ECC_MODE
+ | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
+
+ host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+
+ host->clrflashstatus = FS_READY_BSY_N;
+ host->clrreadstatus = 0xc0;
+ nandc->regs->erased_cw_detect_cfg_clr =
+ cpu_to_le32(CLR_ERASED_PAGE_DET);
+ nandc->regs->erased_cw_detect_cfg_set =
+ cpu_to_le32(SET_ERASED_PAGE_DET);
+
+ dev_dbg(nandc->dev,
+ "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
+ host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
+ host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
+ cwperpage);
+
+ return 0;
+}
+
+static const struct nand_controller_ops qcom_nandc_ops = {
+ .attach_chip = qcom_nand_attach_chip,
+};
+
+static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+ if (nandc->props->is_bam) {
+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+
+ if (nandc->tx_chan)
+ dma_release_channel(nandc->tx_chan);
+
+ if (nandc->rx_chan)
+ dma_release_channel(nandc->rx_chan);
+
+ if (nandc->cmd_chan)
+ dma_release_channel(nandc->cmd_chan);
+ } else {
+ if (nandc->chan)
+ dma_release_channel(nandc->chan);
+ }
+}
+
+static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+{
+ int ret;
+
+ ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(nandc->dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ /*
+ * we use the internal buffer for reading ONFI params, reading small
+ * data like ID and status, and preforming read-copy-write operations
+ * when writing to a codeword partially. 532 is the maximum possible
+ * size of a codeword for our nand controller
+ */
+ nandc->buf_size = 532;
+
+ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
+ GFP_KERNEL);
+ if (!nandc->data_buffer)
+ return -ENOMEM;
+
+ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
+ GFP_KERNEL);
+ if (!nandc->regs)
+ return -ENOMEM;
+
+ nandc->reg_read_buf = devm_kcalloc(nandc->dev,
+ MAX_REG_RD, sizeof(*nandc->reg_read_buf),
+ GFP_KERNEL);
+ if (!nandc->reg_read_buf)
+ return -ENOMEM;
+
+ if (nandc->props->is_bam) {
+ nandc->reg_read_dma =
+ dma_map_single(nandc->dev, nandc->reg_read_buf,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
+ dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
+ return -EIO;
+ }
+
+ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+ if (IS_ERR(nandc->tx_chan)) {
+ ret = PTR_ERR(nandc->tx_chan);
+ nandc->tx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "tx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+ if (IS_ERR(nandc->rx_chan)) {
+ ret = PTR_ERR(nandc->rx_chan);
+ nandc->rx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+ if (IS_ERR(nandc->cmd_chan)) {
+ ret = PTR_ERR(nandc->cmd_chan);
+ nandc->cmd_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "cmd DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ /*
+ * Initially allocate BAM transaction to read ONFI param page.
+ * After detecting all the devices, this BAM transaction will
+ * be freed and the next BAM tranasction will be allocated with
+ * maximum codeword size
+ */
+ nandc->max_cwperpage = 1;
+ nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ ret = -ENOMEM;
+ goto unalloc;
+ }
+ } else {
+ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+ if (IS_ERR(nandc->chan)) {
+ ret = PTR_ERR(nandc->chan);
+ nandc->chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rxtx DMA channel request failed\n");
+ return ret;
+ }
+ }
+
+ INIT_LIST_HEAD(&nandc->desc_list);
+ INIT_LIST_HEAD(&nandc->host_list);
+
+ nand_controller_init(&nandc->controller);
+ nandc->controller.ops = &qcom_nandc_ops;
+
+ return 0;
+unalloc:
+ qcom_nandc_unalloc(nandc);
+ return ret;
+}
+
+/* one time setup of a few nand controller registers */
+static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+{
+ u32 nand_ctrl;
+
+ /* kill onenand */
+ if (!nandc->props->is_qpic)
+ nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+ nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
+ NAND_DEV_CMD_VLD_VAL);
+
+ /* enable ADM or BAM DMA */
+ if (nandc->props->is_bam) {
+ nand_ctrl = nandc_read(nandc, NAND_CTRL);
+
+ /*
+ *NAND_CTRL is an operational registers, and CPU
+ * access to operational registers are read only
+ * in BAM mode. So update the NAND_CTRL register
+ * only if it is not in BAM mode. In most cases BAM
+ * mode will be enabled in bootloader
+ */
+ if (!(nand_ctrl & BAM_MODE_EN))
+ nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
+ } else {
+ nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
+ }
+
+ /* save the original values of these registers */
+ nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
+ nandc->vld = NAND_DEV_CMD_VLD_VAL;
+
+ return 0;
+}
+
+static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
+ struct qcom_nand_host *host,
+ struct device_node *dn)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = nandc->dev;
+ int ret;
+
+ ret = of_property_read_u32(dn, "reg", &host->cs);
+ if (ret) {
+ dev_err(dev, "can't get chip-select\n");
+ return -ENXIO;
+ }
+
+ nand_set_flash_node(chip, dn);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ chip->legacy.cmdfunc = qcom_nandc_command;
+ chip->legacy.select_chip = qcom_nandc_select_chip;
+ chip->legacy.read_byte = qcom_nandc_read_byte;
+ chip->legacy.read_buf = qcom_nandc_read_buf;
+ chip->legacy.write_buf = qcom_nandc_write_buf;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
+
+ /*
+ * the bad block marker is readable only when we read the last codeword
+ * of a page with ECC disabled. currently, the nand_base and nand_bbt
+ * helpers don't allow us to read BB from a nand chip with ECC
+ * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
+ * and block_markbad helpers until we permanently switch to using
+ * MTD_OPS_RAW for all drivers (with the help of badblockbits)
+ */
+ chip->legacy.block_bad = qcom_nandc_block_bad;
+ chip->legacy.block_markbad = qcom_nandc_block_markbad;
+
+ chip->controller = &nandc->controller;
+ chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
+ NAND_SKIP_BBTSCAN;
+
+ /* set up initial status value */
+ host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ if (nandc->props->is_bam) {
+ free_bam_transaction(nandc);
+ nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ nand_cleanup(chip);
+
+ return ret;
+}
+
+static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
+{
+ struct device *dev = nandc->dev;
+ struct device_node *dn = dev->of_node, *child;
+ struct qcom_nand_host *host;
+ int ret = -ENODEV;
+
+ for_each_available_child_of_node(dn, child) {
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ ret = qcom_nand_host_init_and_register(nandc, host, child);
+ if (ret) {
+ devm_kfree(dev, host);
+ continue;
+ }
+
+ list_add_tail(&host->node, &nandc->host_list);
+ }
+
+ return ret;
+}
+
+/* parse custom DT properties here */
+static int qcom_nandc_parse_dt(struct platform_device *pdev)
+{
+ struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
+ struct device_node *np = nandc->dev->of_node;
+ int ret;
+
+ if (!nandc->props->is_bam) {
+ ret = of_property_read_u32(np, "qcom,cmd-crci",
+ &nandc->cmd_crci);
+ if (ret) {
+ dev_err(nandc->dev, "command CRCI unspecified\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,data-crci",
+ &nandc->data_crci);
+ if (ret) {
+ dev_err(nandc->dev, "data CRCI unspecified\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_nandc_probe(struct platform_device *pdev)
+{
+ struct qcom_nand_controller *nandc;
+ const void *dev_data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
+
+ nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
+ if (!nandc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, nandc);
+ nandc->dev = dev;
+
+ dev_data = of_device_get_match_data(dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "failed to get device data\n");
+ return -ENODEV;
+ }
+
+ nandc->props = dev_data;
+
+ nandc->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(nandc->core_clk))
+ return PTR_ERR(nandc->core_clk);
+
+ nandc->aon_clk = devm_clk_get(dev, "aon");
+ if (IS_ERR(nandc->aon_clk))
+ return PTR_ERR(nandc->aon_clk);
+
+ ret = qcom_nandc_parse_dt(pdev);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nandc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nandc->base))
+ return PTR_ERR(nandc->base);
+
+ nandc->base_phys = res->start;
+ nandc->base_dma = dma_map_resource(dev, res->start,
+ resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (!nandc->base_dma)
+ return -ENXIO;
+
+ ret = clk_prepare_enable(nandc->core_clk);
+ if (ret)
+ goto err_core_clk;
+
+ ret = clk_prepare_enable(nandc->aon_clk);
+ if (ret)
+ goto err_aon_clk;
+
+ ret = qcom_nandc_alloc(nandc);
+ if (ret)
+ goto err_nandc_alloc;
+
+ ret = qcom_nandc_setup(nandc);
+ if (ret)
+ goto err_setup;
+
+ ret = qcom_probe_nand_devices(nandc);
+ if (ret)
+ goto err_setup;
+
+ return 0;
+
+err_setup:
+ qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
+ clk_disable_unprepare(nandc->aon_clk);
+err_aon_clk:
+ clk_disable_unprepare(nandc->core_clk);
+err_core_clk:
+ dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ return ret;
+}
+
+static int qcom_nandc_remove(struct platform_device *pdev)
+{
+ struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct qcom_nand_host *host;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry(host, &nandc->host_list, node) {
+ chip = &host->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ }
+
+ qcom_nandc_unalloc(nandc);
+
+ clk_disable_unprepare(nandc->aon_clk);
+ clk_disable_unprepare(nandc->core_clk);
+
+ dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+
+ return 0;
+}
+
+static const struct qcom_nandc_props ipq806x_nandc_props = {
+ .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
+ .is_bam = false,
+ .dev_cmd_reg_start = 0x0,
+};
+
+static const struct qcom_nandc_props ipq4019_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+ .is_bam = true,
+ .is_qpic = true,
+ .dev_cmd_reg_start = 0x0,
+};
+
+static const struct qcom_nandc_props ipq8074_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+ .is_bam = true,
+ .is_qpic = true,
+ .dev_cmd_reg_start = 0x7000,
+};
+
+/*
+ * data will hold a struct pointer containing more differences once we support
+ * more controller variants
+ */
+static const struct of_device_id qcom_nandc_of_match[] = {
+ {
+ .compatible = "qcom,ipq806x-nand",
+ .data = &ipq806x_nandc_props,
+ },
+ {
+ .compatible = "qcom,ipq4019-nand",
+ .data = &ipq4019_nandc_props,
+ },
+ {
+ .compatible = "qcom,ipq8074-nand",
+ .data = &ipq8074_nandc_props,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
+
+static struct platform_driver qcom_nandc_driver = {
+ .driver = {
+ .name = "qcom-nandc",
+ .of_match_table = qcom_nandc_of_match,
+ },
+ .probe = qcom_nandc_probe,
+ .remove = qcom_nandc_remove,
+};
+module_platform_driver(qcom_nandc_driver);
+
+MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
new file mode 100644
index 000000000..c742354c1
--- /dev/null
+++ b/drivers/mtd/nand/raw/r852.c
@@ -0,0 +1,1093 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ */
+
+#define DRV_NAME "r852"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include "sm_common.h"
+#include "r852.h"
+
+
+static bool r852_enable_dma = 1;
+module_param(r852_enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* read register */
+static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
+{
+ uint8_t reg = readb(dev->mmio + address);
+ return reg;
+}
+
+/* write register */
+static inline void r852_write_reg(struct r852_device *dev,
+ int address, uint8_t value)
+{
+ writeb(value, dev->mmio + address);
+}
+
+
+/* read dword sized register */
+static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
+{
+ uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
+ return reg;
+}
+
+/* write dword sized register */
+static inline void r852_write_reg_dword(struct r852_device *dev,
+ int address, uint32_t value)
+{
+ writel(cpu_to_le32(value), dev->mmio + address);
+}
+
+/* returns pointer to our private structure */
+static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ return nand_get_controller_data(chip);
+}
+
+
+/* check if controller supports dma */
+static void r852_dma_test(struct r852_device *dev)
+{
+ dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
+ (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
+
+ if (!dev->dma_usable)
+ message("Non dma capable device detected, dma disabled");
+
+ if (!r852_enable_dma) {
+ message("disabling dma on user request");
+ dev->dma_usable = 0;
+ }
+}
+
+/*
+ * Enable dma. Enables ether first or second stage of the DMA,
+ * Expects dev->dma_dir and dev->dma_state be set
+ */
+static void r852_dma_enable(struct r852_device *dev)
+{
+ uint8_t dma_reg, dma_irq_reg;
+
+ /* Set up dma settings */
+ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
+ dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
+
+ if (dev->dma_dir)
+ dma_reg |= R852_DMA_READ;
+
+ if (dev->dma_state == DMA_INTERNAL) {
+ dma_reg |= R852_DMA_INTERNAL;
+ /* Precaution to make sure HW doesn't write */
+ /* to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ } else {
+ dma_reg |= R852_DMA_MEMORY;
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_dma_addr));
+ }
+
+ /* Precaution: make sure write reached the device */
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
+
+ /* Set dma irq */
+ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ dma_irq_reg |
+ R852_DMA_IRQ_INTERNAL |
+ R852_DMA_IRQ_ERROR |
+ R852_DMA_IRQ_MEMORY);
+}
+
+/*
+ * Disable dma, called from the interrupt handler, which specifies
+ * success of the operation via 'error' argument
+ */
+static void r852_dma_done(struct r852_device *dev, int error)
+{
+ WARN_ON(dev->dma_stage == 0);
+
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
+ r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
+
+ /* Precaution to make sure HW doesn't write to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ dev->dma_error = error;
+ dev->dma_stage = 0;
+
+ if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
+ dma_unmap_single(&dev->pci_dev->dev, dev->phys_dma_addr,
+ R852_DMA_LEN,
+ dev->dma_dir ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+
+/*
+ * Wait, till dma is done, which includes both phases of it
+ */
+static int r852_dma_wait(struct r852_device *dev)
+{
+ long timeout = wait_for_completion_timeout(&dev->dma_done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ dbg("timeout waiting for DMA interrupt");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Read/Write one page using dma. Only pages can be read (512 bytes)
+*/
+static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
+{
+ int bounce = 0;
+ unsigned long flags;
+ int error;
+
+ dev->dma_error = 0;
+
+ /* Set dma direction */
+ dev->dma_dir = do_read;
+ dev->dma_stage = 1;
+ reinit_completion(&dev->dma_done);
+
+ dbg_verbose("doing dma %s ", do_read ? "read" : "write");
+
+ /* Set initial dma state: for reading first fill on board buffer,
+ from device, for writes first fill the buffer from memory*/
+ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
+
+ /* if incoming buffer is not page aligned, we should do bounce */
+ if ((unsigned long)buf & (R852_DMA_LEN-1))
+ bounce = 1;
+
+ if (!bounce) {
+ dev->phys_dma_addr = dma_map_single(&dev->pci_dev->dev, buf,
+ R852_DMA_LEN,
+ do_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pci_dev->dev, dev->phys_dma_addr))
+ bounce = 1;
+ }
+
+ if (bounce) {
+ dbg_verbose("dma: using bounce buffer");
+ dev->phys_dma_addr = dev->phys_bounce_buffer;
+ if (!do_read)
+ memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
+ }
+
+ /* Enable DMA */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ r852_dma_enable(dev);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* Wait till complete */
+ error = r852_dma_wait(dev);
+
+ if (error) {
+ r852_dma_done(dev, error);
+ return;
+ }
+
+ if (do_read && bounce)
+ memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
+}
+
+/*
+ * Program data lines of the nand chip to send data to it
+ */
+static void r852_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ uint32_t reg;
+
+ /* Don't allow any access to hardware if we suspect card removal */
+ if (dev->card_unstable)
+ return;
+
+ /* Special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, (uint8_t *)buf, 0);
+ return;
+ }
+
+ /* write DWORD chinks - faster */
+ while (len >= 4) {
+ reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
+ r852_write_reg_dword(dev, R852_DATALINE, reg);
+ buf += 4;
+ len -= 4;
+
+ }
+
+ /* write rest */
+ while (len > 0) {
+ r852_write_reg(dev, R852_DATALINE, *buf++);
+ len--;
+ }
+}
+
+/*
+ * Read data lines of the nand chip to retrieve data
+ */
+static void r852_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ uint32_t reg;
+
+ if (dev->card_unstable) {
+ /* since we can't signal error here, at least, return
+ predictable buffer */
+ memset(buf, 0, len);
+ return;
+ }
+
+ /* special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, buf, 1);
+ return;
+ }
+
+ /* read in dword sized chunks */
+ while (len >= 4) {
+
+ reg = r852_read_reg_dword(dev, R852_DATALINE);
+ *buf++ = reg & 0xFF;
+ *buf++ = (reg >> 8) & 0xFF;
+ *buf++ = (reg >> 16) & 0xFF;
+ *buf++ = (reg >> 24) & 0xFF;
+ len -= 4;
+ }
+
+ /* read the reset by bytes */
+ while (len--)
+ *buf++ = r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Read one byte from nand chip
+ */
+static uint8_t r852_read_byte(struct nand_chip *chip)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+
+ /* Same problem as in r852_read_buf.... */
+ if (dev->card_unstable)
+ return 0;
+
+ return r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Control several chip lines & send commands
+ */
+static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+
+ if (dev->card_unstable)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+
+ dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
+ R852_CTL_ON | R852_CTL_CARDENABLE);
+
+ if (ctrl & NAND_ALE)
+ dev->ctlreg |= R852_CTL_DATA;
+
+ if (ctrl & NAND_CLE)
+ dev->ctlreg |= R852_CTL_COMMAND;
+
+ if (ctrl & NAND_NCE)
+ dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
+ else
+ dev->ctlreg &= ~R852_CTL_WRITE;
+
+ /* when write is stareted, enable write access */
+ if (dat == NAND_CMD_ERASE1)
+ dev->ctlreg |= R852_CTL_WRITE;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
+ to set write mode */
+ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
+ dev->ctlreg |= R852_CTL_WRITE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ if (dat != NAND_CMD_NONE)
+ r852_write_reg(dev, R852_DATALINE, dat);
+}
+
+/*
+ * Wait till card is ready.
+ * based on nand_wait, but returns errors on DMA error
+ */
+static int r852_wait(struct nand_chip *chip)
+{
+ struct r852_device *dev = nand_get_controller_data(chip);
+
+ unsigned long timeout;
+ u8 status;
+
+ timeout = jiffies + msecs_to_jiffies(400);
+
+ while (time_before(jiffies, timeout))
+ if (chip->legacy.dev_ready(chip))
+ break;
+
+ nand_status_op(chip, &status);
+
+ /* Unfortunelly, no way to send detailed error status... */
+ if (dev->dma_error) {
+ status |= NAND_STATUS_FAIL;
+ dev->dma_error = 0;
+ }
+ return status;
+}
+
+/*
+ * Check if card is ready
+ */
+
+static int r852_ready(struct nand_chip *chip)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
+}
+
+
+/*
+ * Set ECC engine mode
+*/
+
+static void r852_ecc_hwctl(struct nand_chip *chip, int mode)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+
+ if (dev->card_unstable)
+ return;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ /* enable ecc generation/check*/
+ dev->ctlreg |= R852_CTL_ECC_ENABLE;
+
+ /* flush ecc buffer */
+ r852_write_reg(dev, R852_CTL,
+ dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return;
+
+ case NAND_ECC_READSYN:
+ /* disable ecc generation */
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+}
+
+/*
+ * Calculate ECC, only used for writes
+ */
+
+static int r852_ecc_calculate(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ struct sm_oob *oob = (struct sm_oob *)ecc_code;
+ uint32_t ecc1, ecc2;
+
+ if (dev->card_unstable)
+ return 0;
+
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
+ ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
+
+ oob->ecc1[0] = (ecc1) & 0xFF;
+ oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
+ oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
+
+ oob->ecc2[0] = (ecc2) & 0xFF;
+ oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
+ oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return 0;
+}
+
+/*
+ * Correct the data using ECC, hw did almost everything for us
+ */
+
+static int r852_ecc_correct(struct nand_chip *chip, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ uint32_t ecc_reg;
+ uint8_t ecc_status, err_byte;
+ int i, error = 0;
+
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+
+ if (dev->card_unstable)
+ return 0;
+
+ if (dev->dma_error) {
+ dev->dma_error = 0;
+ return -EIO;
+ }
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+ ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+
+ for (i = 0 ; i <= 1 ; i++) {
+
+ ecc_status = (ecc_reg >> 8) & 0xFF;
+
+ /* ecc uncorrectable error */
+ if (ecc_status & R852_ECC_FAIL) {
+ dbg("ecc: unrecoverable error, in half %d", i);
+ error = -EBADMSG;
+ goto exit;
+ }
+
+ /* correctable error */
+ if (ecc_status & R852_ECC_CORRECTABLE) {
+
+ err_byte = ecc_reg & 0xFF;
+ dbg("ecc: recoverable error, "
+ "in half %d, byte %d, bit %d", i,
+ err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
+
+ dat[err_byte] ^=
+ 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
+ error++;
+ }
+
+ dat += 256;
+ ecc_reg >>= 16;
+ }
+exit:
+ return error;
+}
+
+/*
+ * This is copy of nand_read_oob_std
+ * nand_read_oob_syndrome assumes we can send column address - we can't
+ */
+static int r852_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+}
+
+/*
+ * Start the nand engine
+ */
+
+static void r852_engine_enable(struct r852_device *dev)
+{
+ if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ } else {
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ }
+ msleep(300);
+ r852_write_reg(dev, R852_CTL, 0);
+}
+
+
+/*
+ * Stop the nand engine
+ */
+
+static void r852_engine_disable(struct r852_device *dev)
+{
+ r852_write_reg_dword(dev, R852_HW, 0);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
+}
+
+/*
+ * Test if card is present
+ */
+
+static void r852_card_update_present(struct r852_device *dev)
+{
+ unsigned long flags;
+ uint8_t reg;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ reg = r852_read_reg(dev, R852_CARD_STA);
+ dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Update card detection IRQ state according to current card state
+ * which is read in r852_card_update_present
+ */
+static void r852_update_card_detect(struct r852_device *dev)
+{
+ int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ dev->card_unstable = 0;
+
+ card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
+ card_detect_reg |= R852_CARD_IRQ_GENABLE;
+
+ card_detect_reg |= dev->card_detected ?
+ R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
+
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
+}
+
+static ssize_t r852_media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
+ struct r852_device *dev = r852_get_dev(mtd);
+ char *data = dev->sm ? "smartmedia" : "xd";
+
+ strcpy(buf, data);
+ return strlen(data);
+}
+
+static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+
+
+/* Detect properties of card in slot */
+static void r852_update_media_status(struct r852_device *dev)
+{
+ uint8_t reg;
+ unsigned long flags;
+ int readonly;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (!dev->card_detected) {
+ message("card removed");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ;
+ }
+
+ readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
+ reg = r852_read_reg(dev, R852_DMA_CAP);
+ dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
+
+ message("detected %s %s card in slot",
+ dev->sm ? "SmartMedia" : "xD",
+ readonly ? "readonly" : "writeable");
+
+ dev->readonly = readonly;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Register the nand device
+ * Called when the card is detected
+ */
+static int r852_register_nand_device(struct r852_device *dev)
+{
+ struct mtd_info *mtd = nand_to_mtd(dev->chip);
+
+ WARN_ON(dev->card_registered);
+
+ mtd->dev.parent = &dev->pci_dev->dev;
+
+ if (dev->readonly)
+ dev->chip->options |= NAND_ROM;
+
+ r852_engine_enable(dev);
+
+ if (sm_register_device(mtd, dev->sm))
+ goto error1;
+
+ if (device_create_file(&mtd->dev, &dev_attr_media_type)) {
+ message("can't create media type sysfs attribute");
+ goto error3;
+ }
+
+ dev->card_registered = 1;
+ return 0;
+error3:
+ WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip)));
+ nand_cleanup(dev->chip);
+error1:
+ /* Force card redetect */
+ dev->card_detected = 0;
+ return -1;
+}
+
+/*
+ * Unregister the card
+ */
+
+static void r852_unregister_nand_device(struct r852_device *dev)
+{
+ struct mtd_info *mtd = nand_to_mtd(dev->chip);
+
+ if (!dev->card_registered)
+ return;
+
+ device_remove_file(&mtd->dev, &dev_attr_media_type);
+ WARN_ON(mtd_device_unregister(mtd));
+ nand_cleanup(dev->chip);
+ r852_engine_disable(dev);
+ dev->card_registered = 0;
+}
+
+/* Card state updater */
+static void r852_card_detect_work(struct work_struct *work)
+{
+ struct r852_device *dev =
+ container_of(work, struct r852_device, card_detect_work.work);
+
+ r852_card_update_present(dev);
+ r852_update_card_detect(dev);
+ dev->card_unstable = 0;
+
+ /* False alarm */
+ if (dev->card_detected == dev->card_registered)
+ goto exit;
+
+ /* Read media properties */
+ r852_update_media_status(dev);
+
+ /* Register the card */
+ if (dev->card_detected)
+ r852_register_nand_device(dev);
+ else
+ r852_unregister_nand_device(dev);
+exit:
+ r852_update_card_detect(dev);
+}
+
+/* Ack + disable IRQ generation */
+static void r852_disable_irqs(struct r852_device *dev)
+{
+ uint8_t reg;
+ reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
+
+ reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ reg & ~R852_DMA_IRQ_MASK);
+
+ r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
+}
+
+/* Interrupt handler */
+static irqreturn_t r852_irq(int irq, void *data)
+{
+ struct r852_device *dev = (struct r852_device *)data;
+
+ uint8_t card_status, dma_status;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+
+ /* handle card detection interrupts first */
+ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
+ r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
+
+ if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
+
+ ret = IRQ_HANDLED;
+ dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
+
+ /* we shouldn't receive any interrupts if we wait for card
+ to settle */
+ WARN_ON(dev->card_unstable);
+
+ /* disable irqs while card is unstable */
+ /* this will timeout DMA if active, but better that garbage */
+ r852_disable_irqs(dev);
+
+ if (dev->card_unstable)
+ goto out;
+
+ /* let, card state to settle a bit, and then do the work */
+ dev->card_unstable = 1;
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(100));
+ goto out;
+ }
+
+
+ /* Handle dma interrupts */
+ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
+
+ if (dma_status & R852_DMA_IRQ_MASK) {
+
+ ret = IRQ_HANDLED;
+
+ if (dma_status & R852_DMA_IRQ_ERROR) {
+ dbg("received dma error IRQ");
+ r852_dma_done(dev, -EIO);
+ complete(&dev->dma_done);
+ goto out;
+ }
+
+ /* received DMA interrupt out of nowhere? */
+ WARN_ON_ONCE(dev->dma_stage == 0);
+
+ if (dev->dma_stage == 0)
+ goto out;
+
+ /* done device access */
+ if (dev->dma_state == DMA_INTERNAL &&
+ (dma_status & R852_DMA_IRQ_INTERNAL)) {
+
+ dev->dma_state = DMA_MEMORY;
+ dev->dma_stage++;
+ }
+
+ /* done memory DMA */
+ if (dev->dma_state == DMA_MEMORY &&
+ (dma_status & R852_DMA_IRQ_MEMORY)) {
+ dev->dma_state = DMA_INTERNAL;
+ dev->dma_stage++;
+ }
+
+ /* Enable 2nd half of dma dance */
+ if (dev->dma_stage == 2)
+ r852_dma_enable(dev);
+
+ /* Operation done */
+ if (dev->dma_stage == 3) {
+ r852_dma_done(dev, 0);
+ complete(&dev->dma_done);
+ }
+ goto out;
+ }
+
+ /* Handle unknown interrupts */
+ if (dma_status)
+ dbg("bad dma IRQ status = %x", dma_status);
+
+ if (card_status & ~R852_CARD_STA_CD)
+ dbg("strange card status = %x", card_status);
+
+out:
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ret;
+}
+
+static int r852_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ chip->ecc.size = R852_DMA_LEN;
+ chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.strength = 2;
+ chip->ecc.hwctl = r852_ecc_hwctl;
+ chip->ecc.calculate = r852_ecc_calculate;
+ chip->ecc.correct = r852_ecc_correct;
+
+ /* TODO: hack */
+ chip->ecc.read_oob = r852_read_oob;
+
+ return 0;
+}
+
+static const struct nand_controller_ops r852_ops = {
+ .attach_chip = r852_attach_chip,
+};
+
+static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ int error;
+ struct nand_chip *chip;
+ struct r852_device *dev;
+
+ /* pci initialization */
+ error = pci_enable_device(pci_dev);
+
+ if (error)
+ goto error1;
+
+ pci_set_master(pci_dev);
+
+ error = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
+ if (error)
+ goto error2;
+
+ error = pci_request_regions(pci_dev, DRV_NAME);
+
+ if (error)
+ goto error3;
+
+ error = -ENOMEM;
+
+ /* init nand chip, but register it only on card insert */
+ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+
+ if (!chip)
+ goto error4;
+
+ /* commands */
+ chip->legacy.cmd_ctrl = r852_cmdctl;
+ chip->legacy.waitfunc = r852_wait;
+ chip->legacy.dev_ready = r852_ready;
+
+ /* I/O */
+ chip->legacy.read_byte = r852_read_byte;
+ chip->legacy.read_buf = r852_read_buf;
+ chip->legacy.write_buf = r852_write_buf;
+
+ /* init our device structure */
+ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
+
+ if (!dev)
+ goto error5;
+
+ nand_set_controller_data(chip, dev);
+ dev->chip = chip;
+ dev->pci_dev = pci_dev;
+ pci_set_drvdata(pci_dev, dev);
+
+ nand_controller_init(&dev->controller);
+ dev->controller.ops = &r852_ops;
+ chip->controller = &dev->controller;
+
+ dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN,
+ &dev->phys_bounce_buffer, GFP_KERNEL);
+
+ if (!dev->bounce_buffer)
+ goto error6;
+
+
+ error = -ENODEV;
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+
+ if (!dev->mmio)
+ goto error7;
+
+ error = -ENOMEM;
+ dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+
+ if (!dev->tmp_buffer)
+ goto error8;
+
+ init_completion(&dev->dma_done);
+
+ dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
+
+ if (!dev->card_workqueue)
+ goto error9;
+
+ INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
+
+ /* shutdown everything - precation */
+ r852_engine_disable(dev);
+ r852_disable_irqs(dev);
+
+ r852_dma_test(dev);
+
+ dev->irq = pci_dev->irq;
+ spin_lock_init(&dev->irqlock);
+
+ dev->card_detected = 0;
+ r852_card_update_present(dev);
+
+ /*register irq handler*/
+ error = -ENODEV;
+ if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
+ DRV_NAME, dev))
+ goto error10;
+
+ /* kick initial present test */
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 0);
+
+
+ pr_notice("driver loaded successfully\n");
+ return 0;
+
+error10:
+ destroy_workqueue(dev->card_workqueue);
+error9:
+ kfree(dev->tmp_buffer);
+error8:
+ pci_iounmap(pci_dev, dev->mmio);
+error7:
+ dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
+ dev->phys_bounce_buffer);
+error6:
+ kfree(dev);
+error5:
+ kfree(chip);
+error4:
+ pci_release_regions(pci_dev);
+error3:
+error2:
+ pci_disable_device(pci_dev);
+error1:
+ return error;
+}
+
+static void r852_remove(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ /* Stop detect workqueue -
+ we are going to unregister the device anyway*/
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ destroy_workqueue(dev->card_workqueue);
+
+ /* Unregister the device, this might make more IO */
+ r852_unregister_nand_device(dev);
+
+ /* Stop interrupts */
+ r852_disable_irqs(dev);
+ free_irq(dev->irq, dev);
+
+ /* Cleanup */
+ kfree(dev->tmp_buffer);
+ pci_iounmap(pci_dev, dev->mmio);
+ dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
+ dev->phys_bounce_buffer);
+
+ kfree(dev->chip);
+ kfree(dev);
+
+ /* Shutdown the PCI device */
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+static void r852_shutdown(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ pci_disable_device(pci_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int r852_suspend(struct device *device)
+{
+ struct r852_device *dev = dev_get_drvdata(device);
+
+ if (dev->ctlreg & R852_CTL_CARDENABLE)
+ return -EBUSY;
+
+ /* First make sure the detect work is gone */
+ cancel_delayed_work_sync(&dev->card_detect_work);
+
+ /* Turn off the interrupts and stop the device */
+ r852_disable_irqs(dev);
+ r852_engine_disable(dev);
+
+ /* If card was pulled off just during the suspend, which is very
+ unlikely, we will remove it on resume, it too late now
+ anyway... */
+ dev->card_unstable = 0;
+ return 0;
+}
+
+static int r852_resume(struct device *device)
+{
+ struct r852_device *dev = dev_get_drvdata(device);
+
+ r852_disable_irqs(dev);
+ r852_card_update_present(dev);
+ r852_engine_disable(dev);
+
+
+ /* If card status changed, just do the work */
+ if (dev->card_detected != dev->card_registered) {
+ dbg("card was %s during low power state",
+ dev->card_detected ? "added" : "removed");
+
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(1000));
+ return 0;
+ }
+
+ /* Otherwise, initialize the card */
+ if (dev->card_registered) {
+ r852_engine_enable(dev);
+ nand_select_target(dev->chip, 0);
+ nand_reset_op(dev->chip);
+ nand_deselect_target(dev->chip);
+ }
+
+ /* Program card detection IRQ */
+ r852_update_card_detect(dev);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id r852_pci_id_tbl[] = {
+
+ { PCI_VDEVICE(RICOH, 0x0852), },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
+
+static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+
+static struct pci_driver r852_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = r852_pci_id_tbl,
+ .probe = r852_probe,
+ .remove = r852_remove,
+ .shutdown = r852_shutdown,
+ .driver.pm = &r852_pm_ops,
+};
+
+module_pci_driver(r852_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/raw/r852.h b/drivers/mtd/nand/raw/r852.h
new file mode 100644
index 000000000..96fe301d1
--- /dev/null
+++ b/drivers/mtd/nand/raw/r852.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ */
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/spinlock.h>
+
+
+/* nand interface + ecc
+ byte write/read does one cycle on nand data lines.
+ dword write/read does 4 cycles
+ if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
+ results of ecc correction, if DMA read was done before.
+ If write was done two dword reads read generated ecc checksums
+*/
+#define R852_DATALINE 0x00
+
+/* control register */
+#define R852_CTL 0x04
+#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
+#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
+#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
+ /* but has to be set on start...*/
+#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
+#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
+#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
+#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
+#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
+
+/* card detection status */
+#define R852_CARD_STA 0x05
+
+#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
+#define R852_CARD_STA_RO 0x02 /* card is readonly */
+#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
+#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
+#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
+
+/* card detection irq status & enable*/
+#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
+#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
+
+#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
+#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
+#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
+#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
+#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
+#define R852_CARD_IRQ_MASK 0x1D
+
+
+
+/* hardware enable */
+#define R852_HW 0x08
+#define R852_HW_ENABLED 0x01 /* hw enabled */
+#define R852_HW_UNKNOWN 0x80
+
+
+/* dma capabilities */
+#define R852_DMA_CAP 0x09
+#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
+ /* hw is smartmedia */
+#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
+#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
+
+
+/* physical DMA address - 32 bit value*/
+#define R852_DMA_ADDR 0x0C
+
+
+/* dma settings */
+#define R852_DMA_SETTINGS 0x10
+#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
+#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+
+/* dma IRQ status */
+#define R852_DMA_IRQ_STA 0x14
+
+/* dma IRQ enable */
+#define R852_DMA_IRQ_ENABLE 0x18
+
+#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
+#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
+
+
+/* ECC syndrome format - read from reg #0 will return two copies of these for
+ each half of the page.
+ first byte is error byte location, and second, bit location + flags */
+#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
+#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
+#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
+#define R852_ECC_FAIL 0x40 /* non correctable error detected */
+
+#define R852_DMA_LEN 512
+
+#define DMA_INTERNAL 0
+#define DMA_MEMORY 1
+
+struct r852_device {
+ struct nand_controller controller;
+ void __iomem *mmio; /* mmio */
+ struct nand_chip *chip; /* nand chip backpointer */
+ struct pci_dev *pci_dev; /* pci backpointer */
+
+ /* dma area */
+ dma_addr_t phys_dma_addr; /* bus address of buffer*/
+ struct completion dma_done; /* data transfer done */
+
+ dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
+ uint8_t *bounce_buffer; /* virtual address of bounce buffer */
+
+ int dma_dir; /* 1 = read, 0 = write */
+ int dma_stage; /* 0 - idle, 1 - first step,
+ 2 - second step */
+
+ int dma_state; /* 0 = internal, 1 = memory */
+ int dma_error; /* dma errors */
+ int dma_usable; /* is it possible to use dma */
+
+ /* card status area */
+ struct delayed_work card_detect_work;
+ struct workqueue_struct *card_workqueue;
+ int card_registered; /* card registered with mtd */
+ int card_detected; /* card detected in slot */
+ int card_unstable; /* whenever the card is inserted,
+ is not known yet */
+ int readonly; /* card is readonly */
+ int sm; /* Is card smartmedia */
+
+ /* interrupt handling */
+ spinlock_t irqlock; /* IRQ protecting lock */
+ int irq; /* irq num */
+ /* misc */
+ void *tmp_buffer; /* temporary buffer */
+ uint8_t ctlreg; /* cached contents of control reg */
+};
+
+#define dbg(format, ...) \
+ if (debug) \
+ pr_debug(format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ pr_debug(format "\n", ## __VA_ARGS__)
+
+
+#define message(format, ...) \
+ pr_info(format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
new file mode 100644
index 000000000..fbd0fa48e
--- /dev/null
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -0,0 +1,1294 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright © 2004-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Samsung S3C2410/S3C2440/S3C2412 NAND driver
+*/
+
+#define pr_fmt(fmt) "nand-s3c2410: " fmt
+
+#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/platform_data/mtd-nand-s3c2410.h>
+
+#define S3C2410_NFREG(x) (x)
+
+#define S3C2410_NFCONF S3C2410_NFREG(0x00)
+#define S3C2410_NFCMD S3C2410_NFREG(0x04)
+#define S3C2410_NFADDR S3C2410_NFREG(0x08)
+#define S3C2410_NFDATA S3C2410_NFREG(0x0C)
+#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
+#define S3C2410_NFECC S3C2410_NFREG(0x14)
+#define S3C2440_NFCONT S3C2410_NFREG(0x04)
+#define S3C2440_NFCMD S3C2410_NFREG(0x08)
+#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
+#define S3C2440_NFDATA S3C2410_NFREG(0x10)
+#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
+#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
+#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
+#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
+#define S3C2410_NFCONF_EN (1<<15)
+#define S3C2410_NFCONF_INITECC (1<<12)
+#define S3C2410_NFCONF_nFCE (1<<11)
+#define S3C2410_NFCONF_TACLS(x) ((x)<<8)
+#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4)
+#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0)
+#define S3C2410_NFSTAT_BUSY (1<<0)
+#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
+#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
+#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
+#define S3C2440_NFCONT_INITECC (1<<4)
+#define S3C2440_NFCONT_nFCE (1<<1)
+#define S3C2440_NFCONT_ENABLE (1<<0)
+#define S3C2440_NFSTAT_READY (1<<0)
+#define S3C2412_NFCONF_NANDBOOT (1<<31)
+#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
+#define S3C2412_NFCONT_nFCE0 (1<<1)
+#define S3C2412_NFSTAT_READY (1<<0)
+
+/* new oob placement block for use with hardware ecc generation
+ */
+static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = {
+ .ecc = s3c2410_ooblayout_ecc,
+ .free = s3c2410_ooblayout_free,
+};
+
+/* controller and mtd information */
+
+struct s3c2410_nand_info;
+
+/**
+ * struct s3c2410_nand_mtd - driver MTD structure
+ * @mtd: The MTD instance to pass to the MTD layer.
+ * @chip: The NAND chip information.
+ * @set: The platform information supplied for this set of NAND chips.
+ * @info: Link back to the hardware information.
+*/
+struct s3c2410_nand_mtd {
+ struct nand_chip chip;
+ struct s3c2410_nand_set *set;
+ struct s3c2410_nand_info *info;
+};
+
+enum s3c_cpu_type {
+ TYPE_S3C2410,
+ TYPE_S3C2412,
+ TYPE_S3C2440,
+};
+
+enum s3c_nand_clk_state {
+ CLOCK_DISABLE = 0,
+ CLOCK_ENABLE,
+ CLOCK_SUSPEND,
+};
+
+/* overview of the s3c2410 nand state */
+
+/**
+ * struct s3c2410_nand_info - NAND controller state.
+ * @mtds: An array of MTD instances on this controoler.
+ * @platform: The platform data for this board.
+ * @device: The platform device we bound to.
+ * @clk: The clock resource for this controller.
+ * @regs: The area mapped for the hardware registers.
+ * @sel_reg: Pointer to the register controlling the NAND selection.
+ * @sel_bit: The bit in @sel_reg to select the NAND chip.
+ * @mtd_count: The number of MTDs created from this controller.
+ * @save_sel: The contents of @sel_reg to be saved over suspend.
+ * @clk_rate: The clock rate from @clk.
+ * @clk_state: The current clock state.
+ * @cpu_type: The exact type of this controller.
+ */
+struct s3c2410_nand_info {
+ /* mtd info */
+ struct nand_controller controller;
+ struct s3c2410_nand_mtd *mtds;
+ struct s3c2410_platform_nand *platform;
+
+ /* device info */
+ struct device *device;
+ struct clk *clk;
+ void __iomem *regs;
+ void __iomem *sel_reg;
+ int sel_bit;
+ int mtd_count;
+ unsigned long save_sel;
+ unsigned long clk_rate;
+ enum s3c_nand_clk_state clk_state;
+
+ enum s3c_cpu_type cpu_type;
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
+ struct notifier_block freq_transition;
+#endif
+};
+
+struct s3c24XX_nand_devtype_data {
+ enum s3c_cpu_type type;
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2410_nand_devtype_data = {
+ .type = TYPE_S3C2410,
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2412_nand_devtype_data = {
+ .type = TYPE_S3C2412,
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2440_nand_devtype_data = {
+ .type = TYPE_S3C2440,
+};
+
+/* conversion functions */
+
+static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct s3c2410_nand_mtd,
+ chip);
+}
+
+static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
+{
+ return s3c2410_nand_mtd_toours(mtd)->info;
+}
+
+static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
+{
+ return platform_get_drvdata(dev);
+}
+
+static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
+{
+ return dev_get_platdata(&dev->dev);
+}
+
+static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
+{
+#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
+ * @info: The controller instance.
+ * @new_state: State to which clock should be set.
+ */
+static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
+ enum s3c_nand_clk_state new_state)
+{
+ if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
+ return;
+
+ if (info->clk_state == CLOCK_ENABLE) {
+ if (new_state != CLOCK_ENABLE)
+ clk_disable_unprepare(info->clk);
+ } else {
+ if (new_state == CLOCK_ENABLE)
+ clk_prepare_enable(info->clk);
+ }
+
+ info->clk_state = new_state;
+}
+
+/* timing calculations */
+
+#define NS_IN_KHZ 1000000
+
+/**
+ * s3c_nand_calc_rate - calculate timing data.
+ * @wanted: The cycle time in nanoseconds.
+ * @clk: The clock rate in kHz.
+ * @max: The maximum divider value.
+ *
+ * Calculate the timing value from the given parameters.
+ */
+static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
+{
+ int result;
+
+ result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
+
+ pr_debug("result %d from %ld, %d\n", result, clk, wanted);
+
+ if (result > max) {
+ pr_err("%d ns is too big for current clock rate %ld\n",
+ wanted, clk);
+ return -1;
+ }
+
+ if (result < 1)
+ result = 1;
+
+ return result;
+}
+
+#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
+
+/* controller setup */
+
+/**
+ * s3c2410_nand_setrate - setup controller timing information.
+ * @info: The controller instance.
+ *
+ * Given the information supplied by the platform, calculate and set
+ * the necessary timing registers in the hardware to generate the
+ * necessary timing cycles to the hardware.
+ */
+static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
+{
+ struct s3c2410_platform_nand *plat = info->platform;
+ int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
+ int tacls, twrph0, twrph1;
+ unsigned long clkrate = clk_get_rate(info->clk);
+ unsigned long set, cfg, mask;
+ unsigned long flags;
+
+ /* calculate the timing information for the controller */
+
+ info->clk_rate = clkrate;
+ clkrate /= 1000; /* turn clock into kHz for ease of use */
+
+ if (plat != NULL) {
+ tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
+ twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
+ twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
+ } else {
+ /* default timings */
+ tacls = tacls_max;
+ twrph0 = 8;
+ twrph1 = 8;
+ }
+
+ if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
+ dev_err(info->device, "cannot get suitable timings\n");
+ return -EINVAL;
+ }
+
+ dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
+ tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
+ twrph1, to_ns(twrph1, clkrate));
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ mask = (S3C2410_NFCONF_TACLS(3) |
+ S3C2410_NFCONF_TWRPH0(7) |
+ S3C2410_NFCONF_TWRPH1(7));
+ set = S3C2410_NFCONF_EN;
+ set |= S3C2410_NFCONF_TACLS(tacls - 1);
+ set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
+ set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
+ mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
+ S3C2440_NFCONF_TWRPH0(7) |
+ S3C2440_NFCONF_TWRPH1(7));
+
+ set = S3C2440_NFCONF_TACLS(tacls - 1);
+ set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
+ set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
+ break;
+
+ default:
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ cfg = readl(info->regs + S3C2410_NFCONF);
+ cfg &= ~mask;
+ cfg |= set;
+ writel(cfg, info->regs + S3C2410_NFCONF);
+
+ local_irq_restore(flags);
+
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
+ return 0;
+}
+
+/**
+ * s3c2410_nand_inithw - basic hardware initialisation
+ * @info: The hardware state.
+ *
+ * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
+ * to setup the hardware access speeds and set the controller to be enabled.
+*/
+static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
+{
+ int ret;
+
+ ret = s3c2410_nand_setrate(info);
+ if (ret < 0)
+ return ret;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ default:
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
+ /* enable the controller and de-assert nFCE */
+
+ writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
+ }
+
+ return 0;
+}
+
+/**
+ * s3c2410_nand_select_chip - select the given nand chip
+ * @this: NAND chip object.
+ * @chip: The chip number.
+ *
+ * This is called by the MTD layer to either select a given chip for the
+ * @mtd instance, or to indicate that the access has finished and the
+ * chip can be de-selected.
+ *
+ * The routine ensures that the nFCE line is correctly setup, and any
+ * platform specific selection code is called to route nFCE to the specific
+ * chip.
+ */
+static void s3c2410_nand_select_chip(struct nand_chip *this, int chip)
+{
+ struct s3c2410_nand_info *info;
+ struct s3c2410_nand_mtd *nmtd;
+ unsigned long cur;
+
+ nmtd = nand_get_controller_data(this);
+ info = nmtd->info;
+
+ if (chip != -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+
+ cur = readl(info->sel_reg);
+
+ if (chip == -1) {
+ cur |= info->sel_bit;
+ } else {
+ if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
+ dev_err(info->device, "invalid chip %d\n", chip);
+ return;
+ }
+
+ if (info->platform != NULL) {
+ if (info->platform->select_chip != NULL)
+ (info->platform->select_chip) (nmtd->set, chip);
+ }
+
+ cur &= ~info->sel_bit;
+ }
+
+ writel(cur, info->sel_reg);
+
+ if (chip == -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+}
+
+/* s3c2410_nand_hwcontrol
+ *
+ * Issue command and address cycles to the chip
+*/
+
+static void s3c2410_nand_hwcontrol(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, info->regs + S3C2410_NFCMD);
+ else
+ writeb(cmd, info->regs + S3C2410_NFADDR);
+}
+
+/* command and control functions */
+
+static void s3c2440_nand_hwcontrol(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, info->regs + S3C2440_NFCMD);
+ else
+ writeb(cmd, info->regs + S3C2440_NFADDR);
+}
+
+/* s3c2410_nand_devready()
+ *
+ * returns 0 if the nand is busy, 1 if it is ready
+*/
+
+static int s3c2410_nand_devready(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
+}
+
+static int s3c2440_nand_devready(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
+}
+
+static int s3c2412_nand_devready(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
+}
+
+/* ECC handling functions */
+
+static int s3c2410_nand_correct_data(struct nand_chip *chip, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned int diff0, diff1, diff2;
+ unsigned int bit, byte;
+
+ pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc);
+
+ diff0 = read_ecc[0] ^ calc_ecc[0];
+ diff1 = read_ecc[1] ^ calc_ecc[1];
+ diff2 = read_ecc[2] ^ calc_ecc[2];
+
+ pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
+ __func__, 3, read_ecc, 3, calc_ecc,
+ diff0, diff1, diff2);
+
+ if (diff0 == 0 && diff1 == 0 && diff2 == 0)
+ return 0; /* ECC is ok */
+
+ /* sometimes people do not think about using the ECC, so check
+ * to see if we have an 0xff,0xff,0xff read ECC and then ignore
+ * the error, on the assumption that this is an un-eccd page.
+ */
+ if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
+ && info->platform->ignore_unset_ecc)
+ return 0;
+
+ /* Can we correct this ECC (ie, one row and column change).
+ * Note, this is similar to the 256 error code on smartmedia */
+
+ if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 &&
+ ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 &&
+ ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
+ /* calculate the bit position of the error */
+
+ bit = ((diff2 >> 3) & 1) |
+ ((diff2 >> 4) & 2) |
+ ((diff2 >> 5) & 4);
+
+ /* calculate the byte position of the error */
+
+ byte = ((diff2 << 7) & 0x100) |
+ ((diff1 << 0) & 0x80) |
+ ((diff1 << 1) & 0x40) |
+ ((diff1 << 2) & 0x20) |
+ ((diff1 << 3) & 0x10) |
+ ((diff0 >> 4) & 0x08) |
+ ((diff0 >> 3) & 0x04) |
+ ((diff0 >> 2) & 0x02) |
+ ((diff0 >> 1) & 0x01);
+
+ dev_dbg(info->device, "correcting error bit %d, byte %d\n",
+ bit, byte);
+
+ dat[byte] ^= (1 << bit);
+ return 1;
+ }
+
+ /* if there is only one bit difference in the ECC, then
+ * one of only a row or column parity has changed, which
+ * means the error is most probably in the ECC itself */
+
+ diff0 |= (diff1 << 8);
+ diff0 |= (diff2 << 16);
+
+ /* equal to "(diff0 & ~(1 << __ffs(diff0)))" */
+ if ((diff0 & (diff0 - 1)) == 0)
+ return 1;
+
+ return -1;
+}
+
+/* ECC functions
+ *
+ * These allow the s3c2410 and s3c2440 to use the controller's ECC
+ * generator block to ECC the data as it passes through]
+*/
+
+static void s3c2410_nand_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct s3c2410_nand_info *info;
+ unsigned long ctrl;
+
+ info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
+ ctrl = readl(info->regs + S3C2410_NFCONF);
+ ctrl |= S3C2410_NFCONF_INITECC;
+ writel(ctrl, info->regs + S3C2410_NFCONF);
+}
+
+static void s3c2412_nand_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct s3c2410_nand_info *info;
+ unsigned long ctrl;
+
+ info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
+ ctrl = readl(info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
+ info->regs + S3C2440_NFCONT);
+}
+
+static void s3c2440_nand_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct s3c2410_nand_info *info;
+ unsigned long ctrl;
+
+ info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
+ ctrl = readl(info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
+}
+
+static int s3c2410_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
+ ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
+ ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
+
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
+
+ return 0;
+}
+
+static int s3c2412_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
+
+ ecc_code[0] = ecc;
+ ecc_code[1] = ecc >> 8;
+ ecc_code[2] = ecc >> 16;
+
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
+
+ return 0;
+}
+
+static int s3c2440_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
+
+ ecc_code[0] = ecc;
+ ecc_code[1] = ecc >> 8;
+ ecc_code[2] = ecc >> 16;
+
+ pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
+
+ return 0;
+}
+
+/* over-ride the standard functions for a little more speed. We can
+ * use read/write block to move the data buffers to/from the controller
+*/
+
+static void s3c2410_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
+{
+ readsb(this->legacy.IO_ADDR_R, buf, len);
+}
+
+static void s3c2440_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup if we've got less than a word to do */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--)
+ *buf++ = readb(info->regs + S3C2440_NFDATA);
+ }
+}
+
+static void s3c2410_nand_write_buf(struct nand_chip *this, const u_char *buf,
+ int len)
+{
+ writesb(this->legacy.IO_ADDR_W, buf, len);
+}
+
+static void s3c2440_nand_write_buf(struct nand_chip *this, const u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup any fractional write */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--, buf++)
+ writeb(*buf, info->regs + S3C2440_NFDATA);
+ }
+}
+
+/* cpufreq driver support */
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
+
+static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct s3c2410_nand_info *info;
+ unsigned long newclk;
+
+ info = container_of(nb, struct s3c2410_nand_info, freq_transition);
+ newclk = clk_get_rate(info->clk);
+
+ if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) ||
+ (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) {
+ s3c2410_nand_setrate(info);
+ }
+
+ return 0;
+}
+
+static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
+{
+ info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition;
+
+ return cpufreq_register_notifier(&info->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+{
+ cpufreq_unregister_notifier(&info->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
+{
+ return 0;
+}
+
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+{
+}
+#endif
+
+/* device management functions */
+
+static int s3c24xx_nand_remove(struct platform_device *pdev)
+{
+ struct s3c2410_nand_info *info = to_nand_info(pdev);
+
+ if (info == NULL)
+ return 0;
+
+ s3c2410_nand_cpufreq_deregister(info);
+
+ /* Release all our mtds and their partitions, then go through
+ * freeing the resources used
+ */
+
+ if (info->mtds != NULL) {
+ struct s3c2410_nand_mtd *ptr = info->mtds;
+ int mtdno;
+
+ for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
+ pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
+ WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip)));
+ nand_cleanup(&ptr->chip);
+ }
+ }
+
+ /* free the common resources */
+
+ if (!IS_ERR(info->clk))
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
+
+ return 0;
+}
+
+static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *mtd,
+ struct s3c2410_nand_set *set)
+{
+ if (set) {
+ struct mtd_info *mtdinfo = nand_to_mtd(&mtd->chip);
+
+ mtdinfo->name = set->name;
+
+ return mtd_device_register(mtdinfo, set->partitions,
+ set->nr_partitions);
+ }
+
+ return -ENODEV;
+}
+
+static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ struct s3c2410_platform_nand *pdata = info->platform;
+ const struct nand_sdr_timings *timings;
+ int tacls;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ tacls = timings->tCLS_min - timings->tWP_min;
+ if (tacls < 0)
+ tacls = 0;
+
+ pdata->tacls = DIV_ROUND_UP(tacls, 1000);
+ pdata->twrph0 = DIV_ROUND_UP(timings->tWP_min, 1000);
+ pdata->twrph1 = DIV_ROUND_UP(timings->tCLH_min, 1000);
+
+ return s3c2410_nand_setrate(info);
+}
+
+/**
+ * s3c2410_nand_init_chip - initialise a single instance of an chip
+ * @info: The base NAND controller the chip is on.
+ * @nmtd: The new controller MTD instance to fill in.
+ * @set: The information passed from the board specific platform data.
+ *
+ * Initialise the given @nmtd from the information in @info and @set. This
+ * readies the structure for use with the MTD layer functions by ensuring
+ * all pointers are setup and the necessary control routines selected.
+ */
+static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *nmtd,
+ struct s3c2410_nand_set *set)
+{
+ struct device_node *np = info->device->of_node;
+ struct nand_chip *chip = &nmtd->chip;
+ void __iomem *regs = info->regs;
+
+ nand_set_flash_node(chip, set->of_node);
+
+ chip->legacy.write_buf = s3c2410_nand_write_buf;
+ chip->legacy.read_buf = s3c2410_nand_read_buf;
+ chip->legacy.select_chip = s3c2410_nand_select_chip;
+ chip->legacy.chip_delay = 50;
+ nand_set_controller_data(chip, nmtd);
+ chip->options = set->options;
+ chip->controller = &info->controller;
+
+ /*
+ * let's keep behavior unchanged for legacy boards booting via pdata and
+ * auto-detect timings only when booting with a device tree.
+ */
+ if (!np)
+ chip->options |= NAND_KEEP_TIMINGS;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->legacy.IO_ADDR_W = regs + S3C2410_NFDATA;
+ info->sel_reg = regs + S3C2410_NFCONF;
+ info->sel_bit = S3C2410_NFCONF_nFCE;
+ chip->legacy.cmd_ctrl = s3c2410_nand_hwcontrol;
+ chip->legacy.dev_ready = s3c2410_nand_devready;
+ break;
+
+ case TYPE_S3C2440:
+ chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2440_NFCONT_nFCE;
+ chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->legacy.dev_ready = s3c2440_nand_devready;
+ chip->legacy.read_buf = s3c2440_nand_read_buf;
+ chip->legacy.write_buf = s3c2440_nand_write_buf;
+ break;
+
+ case TYPE_S3C2412:
+ chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2412_NFCONT_nFCE0;
+ chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->legacy.dev_ready = s3c2412_nand_devready;
+
+ if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
+ dev_info(info->device, "System booted from NAND\n");
+
+ break;
+ }
+
+ chip->legacy.IO_ADDR_R = chip->legacy.IO_ADDR_W;
+
+ nmtd->info = info;
+ nmtd->set = set;
+
+ chip->ecc.engine_type = info->platform->engine_type;
+
+ /*
+ * If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND.
+ */
+ if (set->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+}
+
+/**
+ * s3c2410_nand_attach_chip - Init the ECC engine after NAND scan
+ * @chip: The NAND chip
+ *
+ * This hook is called by the core after the identification of the NAND chip,
+ * once the relevant per-chip information is up to date.. This call ensure that
+ * we update the internal state accordingly.
+ *
+ * The internal state is currently limited to the ECC state information.
+*/
+static int s3c2410_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ switch (chip->ecc.engine_type) {
+
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ dev_info(info->device, "ECC disabled\n");
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ /*
+ * This driver expects Hamming based ECC when engine_type is set
+ * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
+ * NAND_ECC_ALGO_HAMMING to avoid adding an extra ecc_algo field
+ * to s3c2410_platform_nand.
+ */
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ dev_info(info->device, "soft ECC\n");
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
+ chip->ecc.strength = 1;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2412:
+ chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
+ }
+
+ dev_dbg(info->device, "chip %p => page shift %d\n",
+ chip, chip->page_shift);
+
+ /* change the behaviour depending on whether we are using
+ * the large or small page nand device */
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ mtd_set_ooblayout(nand_to_mtd(chip),
+ &s3c2410_ooblayout_ops);
+ }
+
+ dev_info(info->device, "hardware ECC\n");
+ break;
+
+ default:
+ dev_err(info->device, "invalid ECC mode!\n");
+ return -EINVAL;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ return 0;
+}
+
+static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
+ .attach_chip = s3c2410_nand_attach_chip,
+ .setup_interface = s3c2410_nand_setup_interface,
+};
+
+static const struct of_device_id s3c24xx_nand_dt_ids[] = {
+ {
+ .compatible = "samsung,s3c2410-nand",
+ .data = &s3c2410_nand_devtype_data,
+ }, {
+ /* also compatible with s3c6400 */
+ .compatible = "samsung,s3c2412-nand",
+ .data = &s3c2412_nand_devtype_data,
+ }, {
+ .compatible = "samsung,s3c2440-nand",
+ .data = &s3c2440_nand_devtype_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_nand_dt_ids);
+
+static int s3c24xx_nand_probe_dt(struct platform_device *pdev)
+{
+ const struct s3c24XX_nand_devtype_data *devtype_data;
+ struct s3c2410_platform_nand *pdata;
+ struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node, *child;
+ struct s3c2410_nand_set *sets;
+
+ devtype_data = of_device_get_match_data(&pdev->dev);
+ if (!devtype_data)
+ return -ENODEV;
+
+ info->cpu_type = devtype_data->type;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdev->dev.platform_data = pdata;
+
+ pdata->nr_sets = of_get_child_count(np);
+ if (!pdata->nr_sets)
+ return 0;
+
+ sets = devm_kcalloc(&pdev->dev, pdata->nr_sets, sizeof(*sets),
+ GFP_KERNEL);
+ if (!sets)
+ return -ENOMEM;
+
+ pdata->sets = sets;
+
+ for_each_available_child_of_node(np, child) {
+ sets->name = (char *)child->name;
+ sets->of_node = child;
+ sets->nr_chips = 1;
+
+ of_node_get(child);
+
+ sets++;
+ }
+
+ return 0;
+}
+
+static int s3c24xx_nand_probe_pdata(struct platform_device *pdev)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
+
+ info->cpu_type = platform_get_device_id(pdev)->driver_data;
+
+ return 0;
+}
+
+/* s3c24xx_nand_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code checks to see if
+ * it can allocate all necessary resources then calls the
+ * nand layer to look for devices
+*/
+static int s3c24xx_nand_probe(struct platform_device *pdev)
+{
+ struct s3c2410_platform_nand *plat;
+ struct s3c2410_nand_info *info;
+ struct s3c2410_nand_mtd *nmtd;
+ struct s3c2410_nand_set *sets;
+ struct resource *res;
+ int err = 0;
+ int size;
+ int nr_sets;
+ int setno;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ nand_controller_init(&info->controller);
+ info->controller.ops = &s3c24xx_nand_controller_ops;
+
+ /* get the clock source and enable it */
+
+ info->clk = devm_clk_get(&pdev->dev, "nand");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+
+ if (pdev->dev.of_node)
+ err = s3c24xx_nand_probe_dt(pdev);
+ else
+ err = s3c24xx_nand_probe_pdata(pdev);
+
+ if (err)
+ goto exit_error;
+
+ plat = to_nand_plat(pdev);
+
+ /* allocate and map the resource */
+
+ /* currently we assume we have the one resource */
+ res = pdev->resource;
+ size = resource_size(res);
+
+ info->device = &pdev->dev;
+ info->platform = plat;
+
+ info->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->regs)) {
+ err = PTR_ERR(info->regs);
+ goto exit_error;
+ }
+
+ dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
+
+ if (!plat->sets || plat->nr_sets < 1) {
+ err = -EINVAL;
+ goto exit_error;
+ }
+
+ sets = plat->sets;
+ nr_sets = plat->nr_sets;
+
+ info->mtd_count = nr_sets;
+
+ /* allocate our information */
+
+ size = nr_sets * sizeof(*info->mtds);
+ info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (info->mtds == NULL) {
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ /* initialise all possible chips */
+
+ nmtd = info->mtds;
+
+ for (setno = 0; setno < nr_sets; setno++, nmtd++, sets++) {
+ struct mtd_info *mtd = nand_to_mtd(&nmtd->chip);
+
+ pr_debug("initialising set %d (%p, info %p)\n",
+ setno, nmtd, info);
+
+ mtd->dev.parent = &pdev->dev;
+ s3c2410_nand_init_chip(info, nmtd, sets);
+
+ err = nand_scan(&nmtd->chip, sets ? sets->nr_chips : 1);
+ if (err)
+ goto exit_error;
+
+ s3c2410_nand_add_partition(info, nmtd, sets);
+ }
+
+ /* initialise the hardware */
+ err = s3c2410_nand_inithw(info);
+ if (err != 0)
+ goto exit_error;
+
+ err = s3c2410_nand_cpufreq_register(info);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init cpufreq support\n");
+ goto exit_error;
+ }
+
+ if (allow_clk_suspend(info)) {
+ dev_info(&pdev->dev, "clock idle support enabled\n");
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+ }
+
+ return 0;
+
+ exit_error:
+ s3c24xx_nand_remove(pdev);
+
+ if (err == 0)
+ err = -EINVAL;
+ return err;
+}
+
+/* PM Support */
+#ifdef CONFIG_PM
+
+static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+
+ if (info) {
+ info->save_sel = readl(info->sel_reg);
+
+ /* For the moment, we must ensure nFCE is high during
+ * the time we are suspended. This really should be
+ * handled by suspending the MTDs we are using, but
+ * that is currently not the case. */
+
+ writel(info->save_sel | info->sel_bit, info->sel_reg);
+
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
+ }
+
+ return 0;
+}
+
+static int s3c24xx_nand_resume(struct platform_device *dev)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+ unsigned long sel;
+
+ if (info) {
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+ s3c2410_nand_inithw(info);
+
+ /* Restore the state of the nFCE line. */
+
+ sel = readl(info->sel_reg);
+ sel &= ~info->sel_bit;
+ sel |= info->save_sel & info->sel_bit;
+ writel(sel, info->sel_reg);
+
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+ }
+
+ return 0;
+}
+
+#else
+#define s3c24xx_nand_suspend NULL
+#define s3c24xx_nand_resume NULL
+#endif
+
+/* driver device registration */
+
+static const struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-nand",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-nand",
+ .driver_data = TYPE_S3C2440,
+ }, {
+ .name = "s3c2412-nand",
+ .driver_data = TYPE_S3C2412,
+ }, {
+ .name = "s3c6400-nand",
+ .driver_data = TYPE_S3C2412, /* compatible with 2412 */
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+
+static struct platform_driver s3c24xx_nand_driver = {
+ .probe = s3c24xx_nand_probe,
+ .remove = s3c24xx_nand_remove,
+ .suspend = s3c24xx_nand_suspend,
+ .resume = s3c24xx_nand_resume,
+ .id_table = s3c24xx_driver_ids,
+ .driver = {
+ .name = "s3c24xx-nand",
+ .of_match_table = s3c24xx_nand_dt_ids,
+ },
+};
+
+module_platform_driver(s3c24xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
new file mode 100644
index 000000000..8f89e2d3d
--- /dev/null
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -0,0 +1,1234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SuperH FLCTL nand controller
+ *
+ * Copyright (c) 2008 Renesas Solutions Corp.
+ * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
+ *
+ * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sh_flctl.h>
+
+static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 12;
+ oobregion->length = 4;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
+ .ecc = flctl_4secc_ooblayout_sp_ecc,
+ .free = flctl_4secc_ooblayout_sp_free,
+};
+
+static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = section * 16;
+ oobregion->length = 6;
+
+ if (!section) {
+ oobregion->offset += 2;
+ oobregion->length -= 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
+ .ecc = flctl_4secc_ooblayout_lp_ecc,
+ .free = flctl_4secc_ooblayout_lp_free,
+};
+
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr flctl_4secc_smallpage = {
+ .offs = 11,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
+
+static struct nand_bbt_descr flctl_4secc_largepage = {
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern,
+};
+
+static void empty_fifo(struct sh_flctl *flctl)
+{
+ writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
+ writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+}
+
+static void start_translation(struct sh_flctl *flctl)
+{
+ writeb(TRSTRT, FLTRCR(flctl));
+}
+
+static void timeout_error(struct sh_flctl *flctl, const char *str)
+{
+ dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
+}
+
+static void wait_completion(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ if (readb(FLTRCR(flctl)) & TREND) {
+ writeb(0x0, FLTRCR(flctl));
+ return;
+ }
+ udelay(1);
+ }
+
+ timeout_error(flctl, __func__);
+ writeb(0x0, FLTRCR(flctl));
+}
+
+static void flctl_dma_complete(void *param)
+{
+ struct sh_flctl *flctl = param;
+
+ complete(&flctl->dma_complete);
+}
+
+static void flctl_release_dma(struct sh_flctl *flctl)
+{
+ if (flctl->chan_fifo0_rx) {
+ dma_release_channel(flctl->chan_fifo0_rx);
+ flctl->chan_fifo0_rx = NULL;
+ }
+ if (flctl->chan_fifo0_tx) {
+ dma_release_channel(flctl->chan_fifo0_tx);
+ flctl->chan_fifo0_tx = NULL;
+ }
+}
+
+static void flctl_setup_dma(struct sh_flctl *flctl)
+{
+ dma_cap_mask_t mask;
+ struct dma_slave_config cfg;
+ struct platform_device *pdev = flctl->pdev;
+ struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ if (!pdata)
+ return;
+
+ if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
+ return;
+
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
+ dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
+ flctl->chan_fifo0_tx);
+
+ if (!flctl->chan_fifo0_tx)
+ return;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = flctl->fifo;
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
+ dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
+ flctl->chan_fifo0_rx);
+
+ if (!flctl->chan_fifo0_rx)
+ goto err;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.dst_addr = 0;
+ cfg.src_addr = flctl->fifo;
+ ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ init_completion(&flctl->dma_complete);
+
+ return;
+
+err:
+ flctl_release_dma(flctl);
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t addr = 0;
+
+ if (column == -1) {
+ addr = page_addr; /* ERASE1 */
+ } else if (page_addr != -1) {
+ /* SEQIN, READ0, etc.. */
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ if (flctl->page_size) {
+ addr = column & 0x0FFF;
+ addr |= (page_addr & 0xff) << 16;
+ addr |= ((page_addr >> 8) & 0xff) << 24;
+ /* big than 128MB */
+ if (flctl->rw_ADRCNT == ADRCNT2_E) {
+ uint32_t addr2;
+ addr2 = (page_addr >> 16) & 0xff;
+ writel(addr2, FLADR2(flctl));
+ }
+ } else {
+ addr = column;
+ addr |= (page_addr & 0xff) << 8;
+ addr |= ((page_addr >> 8) & 0xff) << 16;
+ addr |= ((page_addr >> 16) & 0xff) << 24;
+ }
+ }
+ writel(addr, FLADR(flctl));
+}
+
+static void wait_rfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ uint32_t val;
+ /* check FIFO */
+ val = readl(FLDTCNTR(flctl)) >> 16;
+ if (val & 0xFF)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static void wait_wfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t len, timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ /* check FIFO */
+ len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
+ if (len >= 4)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static enum flctl_ecc_res_t wait_recfifo_ready
+ (struct sh_flctl *flctl, int sector_number)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+ void __iomem *ecc_reg[4];
+ int i;
+ int state = FL_SUCCESS;
+ uint32_t data, size;
+
+ /*
+ * First this loops checks in FLDTCNTR if we are ready to read out the
+ * oob data. This is the case if either all went fine without errors or
+ * if the bottom part of the loop corrected the errors or marked them as
+ * uncorrectable and the controller is given time to push the data into
+ * the FIFO.
+ */
+ while (timeout--) {
+ /* check if all is ok and we can read out the OOB */
+ size = readl(FLDTCNTR(flctl)) >> 24;
+ if ((size & 0xFF) == 4)
+ return state;
+
+ /* check if a correction code has been calculated */
+ if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
+ /*
+ * either we wait for the fifo to be filled or a
+ * correction pattern is being generated
+ */
+ udelay(1);
+ continue;
+ }
+
+ /* check for an uncorrectable error */
+ if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
+ /* check if we face a non-empty page */
+ for (i = 0; i < 512; i++) {
+ if (flctl->done_buff[i] != 0xff) {
+ state = FL_ERROR; /* can't correct */
+ break;
+ }
+ }
+
+ if (state == FL_SUCCESS)
+ dev_dbg(&flctl->pdev->dev,
+ "reading empty sector %d, ecc error ignored\n",
+ sector_number);
+
+ writel(0, FL4ECCCR(flctl));
+ continue;
+ }
+
+ /* start error correction */
+ ecc_reg[0] = FL4ECCRESULT0(flctl);
+ ecc_reg[1] = FL4ECCRESULT1(flctl);
+ ecc_reg[2] = FL4ECCRESULT2(flctl);
+ ecc_reg[3] = FL4ECCRESULT3(flctl);
+
+ for (i = 0; i < 3; i++) {
+ uint8_t org;
+ unsigned int index;
+
+ data = readl(ecc_reg[i]);
+
+ if (flctl->page_size)
+ index = (512 * sector_number) +
+ (data >> 16);
+ else
+ index = data >> 16;
+
+ org = flctl->done_buff[index];
+ flctl->done_buff[index] = org ^ (data & 0xFF);
+ }
+ state = FL_REPAIRABLE;
+ writel(0, FL4ECCCR(flctl));
+ }
+
+ timeout_error(flctl, __func__);
+ return FL_TIMEOUT; /* timeout */
+}
+
+static void wait_wecfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+ uint32_t len;
+
+ while (timeout--) {
+ /* check FLECFIFO */
+ len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
+ if (len >= 4)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
+ int len, enum dma_data_direction dir)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan;
+ enum dma_transfer_direction tr_dir;
+ dma_addr_t dma_addr;
+ dma_cookie_t cookie;
+ uint32_t reg;
+ int ret = 0;
+ unsigned long time_left;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = flctl->chan_fifo0_rx;
+ tr_dir = DMA_DEV_TO_MEM;
+ } else {
+ chan = flctl->chan_fifo0_tx;
+ tr_dir = DMA_MEM_TO_DEV;
+ }
+
+ dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
+
+ if (!dma_mapping_error(chan->device->dev, dma_addr))
+ desc = dmaengine_prep_slave_single(chan, dma_addr, len,
+ tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (desc) {
+ reg = readl(FLINTDMACR(flctl));
+ reg |= DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ desc->callback = flctl_dma_complete;
+ desc->callback_param = flctl;
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie)) {
+ ret = dma_submit_error(cookie);
+ dev_warn(&flctl->pdev->dev,
+ "DMA submit failed, falling back to PIO\n");
+ goto out;
+ }
+
+ dma_async_issue_pending(chan);
+ } else {
+ /* DMA failed, fall back to PIO */
+ flctl_release_dma(flctl);
+ dev_warn(&flctl->pdev->dev,
+ "DMA failed, falling back to PIO\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ time_left =
+ wait_for_completion_timeout(&flctl->dma_complete,
+ msecs_to_jiffies(3000));
+
+ if (time_left == 0) {
+ dmaengine_terminate_all(chan);
+ dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ reg = readl(FLINTDMACR(flctl));
+ reg &= ~DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ dma_unmap_single(chan->device->dev, dma_addr, len, dir);
+
+ /* ret == 0 is success */
+ return ret;
+}
+
+static void read_datareg(struct sh_flctl *flctl, int offset)
+{
+ unsigned long data;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ wait_completion(flctl);
+
+ data = readl(FLDATAR(flctl));
+ *buf = le32_to_cpu(data);
+}
+
+static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_rx && rlen >= 32 &&
+ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
+ goto convert; /* DMA success */
+
+ /* do polling transfer */
+ for (i = 0; i < len_4align; i++) {
+ wait_rfifo_ready(flctl);
+ buf[i] = readl(FLDTFIFO(flctl));
+ }
+
+convert:
+ for (i = 0; i < len_4align; i++)
+ buf[i] = be32_to_cpu(buf[i]);
+}
+
+static enum flctl_ecc_res_t read_ecfiforeg
+ (struct sh_flctl *flctl, uint8_t *buff, int sector)
+{
+ int i;
+ enum flctl_ecc_res_t res;
+ unsigned long *ecc_buf = (unsigned long *)buff;
+
+ res = wait_recfifo_ready(flctl , sector);
+
+ if (res != FL_ERROR) {
+ for (i = 0; i < 4; i++) {
+ ecc_buf[i] = readl(FLECFIFO(flctl));
+ ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+ }
+ }
+
+ return res;
+}
+
+static void write_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+ for (i = 0; i < len_4align; i++) {
+ wait_wfifo_ready(flctl);
+ writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
+ }
+}
+
+static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+
+ for (i = 0; i < len_4align; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_tx && rlen >= 32 &&
+ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
+ return; /* DMA success */
+
+ /* do polling transfer */
+ for (i = 0; i < len_4align; i++) {
+ wait_wecfifo_ready(flctl);
+ writel(buf[i], FLECFIFO(flctl));
+ }
+}
+
+static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
+ uint32_t flcmdcr_val, addr_len_bytes = 0;
+
+ /* Set SNAND bit if page size is 2048byte */
+ if (flctl->page_size)
+ flcmncr_val |= SNAND_E;
+ else
+ flcmncr_val &= ~SNAND_E;
+
+ /* default FLCMDCR val */
+ flcmdcr_val = DOCMD1_E | DOADR_E;
+
+ /* Set for FLCMDCR */
+ switch (cmd) {
+ case NAND_CMD_ERASE1:
+ addr_len_bytes = flctl->erase_ADRCNT;
+ flcmdcr_val |= DOCMD2_E;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ case NAND_CMD_RNDOUT:
+ addr_len_bytes = flctl->rw_ADRCNT;
+ flcmdcr_val |= CDSRC_E;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ flcmncr_val |= SEL_16BIT;
+ break;
+ case NAND_CMD_SEQIN:
+ /* This case is that cmd is READ0 or READ1 or READ00 */
+ flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */
+ break;
+ case NAND_CMD_PAGEPROG:
+ addr_len_bytes = flctl->rw_ADRCNT;
+ flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ flcmncr_val |= SEL_16BIT;
+ break;
+ case NAND_CMD_READID:
+ flcmncr_val &= ~SNAND_E;
+ flcmdcr_val |= CDSRC_E;
+ addr_len_bytes = ADRCNT_1;
+ break;
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RESET:
+ flcmncr_val &= ~SNAND_E;
+ flcmdcr_val &= ~(DOADR_E | DOSR_E);
+ break;
+ default:
+ break;
+ }
+
+ /* Set address bytes parameter */
+ flcmdcr_val |= addr_len_bytes;
+
+ /* Now actually write */
+ writel(flcmncr_val, FLCMNCR(flctl));
+ writel(flcmdcr_val, FLCMDCR(flctl));
+ writel(flcmcdr_val, FLCMCDR(flctl));
+}
+
+static int flctl_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (oob_required)
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+static int flctl_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+ return nand_prog_page_end_op(chip);
+}
+
+static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int sector, page_sectors;
+ enum flctl_ecc_res_t ecc_result;
+
+ page_sectors = flctl->page_size ? 4 : 1;
+
+ set_cmd_regs(mtd, NAND_CMD_READ0,
+ (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
+ FLCMNCR(flctl));
+ writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+ writel(page_addr << 2, FLADR(flctl));
+
+ empty_fifo(flctl);
+ start_translation(flctl);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ read_fiforeg(flctl, 512, 512 * sector);
+
+ ecc_result = read_ecfiforeg(flctl,
+ &flctl->done_buff[mtd->writesize + 16 * sector],
+ sector);
+
+ switch (ecc_result) {
+ case FL_REPAIRABLE:
+ dev_info(&flctl->pdev->dev,
+ "applied ecc on page 0x%x", page_addr);
+ mtd->ecc_stats.corrected++;
+ break;
+ case FL_ERROR:
+ dev_warn(&flctl->pdev->dev,
+ "page 0x%x contains corrupted data\n",
+ page_addr);
+ mtd->ecc_stats.failed++;
+ break;
+ default:
+ ;
+ }
+ }
+
+ wait_completion(flctl);
+
+ writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
+ FLCMNCR(flctl));
+}
+
+static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_sectors = flctl->page_size ? 4 : 1;
+ int i;
+
+ set_cmd_regs(mtd, NAND_CMD_READ0,
+ (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+ empty_fifo(flctl);
+
+ for (i = 0; i < page_sectors; i++) {
+ set_addr(mtd, (512 + 16) * i + 512 , page_addr);
+ writel(16, FLDTCNTR(flctl));
+
+ start_translation(flctl);
+ read_fiforeg(flctl, 16, 16 * i);
+ wait_completion(flctl);
+ }
+}
+
+static void execmd_write_page_sector(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_addr = flctl->seqin_page_addr;
+ int sector, page_sectors;
+
+ page_sectors = flctl->page_size ? 4 : 1;
+
+ set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+ (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+ empty_fifo(flctl);
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+ writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+ writel(page_addr << 2, FLADR(flctl));
+ start_translation(flctl);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ write_fiforeg(flctl, 512, 512 * sector);
+ write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
+ }
+
+ wait_completion(flctl);
+ writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
+}
+
+static void execmd_write_oob(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_addr = flctl->seqin_page_addr;
+ int sector, page_sectors;
+
+ page_sectors = flctl->page_size ? 4 : 1;
+
+ set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+ (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ empty_fifo(flctl);
+ set_addr(mtd, sector * 528 + 512, page_addr);
+ writel(16, FLDTCNTR(flctl)); /* set read size */
+
+ start_translation(flctl);
+ write_fiforeg(flctl, 16, 16 * sector);
+ wait_completion(flctl);
+ }
+}
+
+static void flctl_cmdfunc(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t read_cmd = 0;
+
+ pm_runtime_get_sync(&flctl->pdev->dev);
+
+ flctl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ flctl->index = 0;
+
+ switch (command) {
+ case NAND_CMD_READ1:
+ case NAND_CMD_READ0:
+ if (flctl->hwecc) {
+ /* read page with hwecc */
+ execmd_read_page_sector(mtd, page_addr);
+ break;
+ }
+ if (flctl->page_size)
+ set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+ | command);
+ else
+ set_cmd_regs(mtd, command, command);
+
+ set_addr(mtd, 0, page_addr);
+
+ flctl->read_bytes = mtd->writesize + mtd->oobsize;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ flctl->index += column;
+ goto read_normal_exit;
+
+ case NAND_CMD_READOOB:
+ if (flctl->hwecc) {
+ /* read page with hwecc */
+ execmd_read_oob(mtd, page_addr);
+ break;
+ }
+
+ if (flctl->page_size) {
+ set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+ | NAND_CMD_READ0);
+ set_addr(mtd, mtd->writesize, page_addr);
+ } else {
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, 0, page_addr);
+ }
+ flctl->read_bytes = mtd->oobsize;
+ goto read_normal_exit;
+
+ case NAND_CMD_RNDOUT:
+ if (flctl->hwecc)
+ break;
+
+ if (flctl->page_size)
+ set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
+ | command);
+ else
+ set_cmd_regs(mtd, command, command);
+
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
+ goto read_normal_exit;
+
+ case NAND_CMD_READID:
+ set_cmd_regs(mtd, command, command);
+
+ /* READID is always performed using an 8-bit bus */
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column <<= 1;
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = 8;
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
+ start_translation(flctl);
+ read_fiforeg(flctl, flctl->read_bytes, 0);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_ERASE1:
+ flctl->erase1_page_addr = page_addr;
+ break;
+
+ case NAND_CMD_ERASE2:
+ set_cmd_regs(mtd, NAND_CMD_ERASE1,
+ (command << 8) | NAND_CMD_ERASE1);
+ set_addr(mtd, -1, flctl->erase1_page_addr);
+ start_translation(flctl);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_SEQIN:
+ if (!flctl->page_size) {
+ /* output read command */
+ if (column >= mtd->writesize) {
+ column -= mtd->writesize;
+ read_cmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ read_cmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ read_cmd = NAND_CMD_READ1;
+ }
+ }
+ flctl->seqin_column = column;
+ flctl->seqin_page_addr = page_addr;
+ flctl->seqin_read_cmd = read_cmd;
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ empty_fifo(flctl);
+ if (!flctl->page_size) {
+ set_cmd_regs(mtd, NAND_CMD_SEQIN,
+ flctl->seqin_read_cmd);
+ set_addr(mtd, -1, -1);
+ writel(0, FLDTCNTR(flctl)); /* set 0 size */
+ start_translation(flctl);
+ wait_completion(flctl);
+ }
+ if (flctl->hwecc) {
+ /* write page with hwecc */
+ if (flctl->seqin_column == mtd->writesize)
+ execmd_write_oob(mtd);
+ else if (!flctl->seqin_column)
+ execmd_write_page_sector(mtd);
+ else
+ pr_err("Invalid address !?\n");
+ break;
+ }
+ set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
+ set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
+ writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
+ start_translation(flctl);
+ write_fiforeg(flctl, flctl->index, 0);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_STATUS:
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, -1, -1);
+
+ flctl->read_bytes = 1;
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ start_translation(flctl);
+ read_datareg(flctl, 0); /* read and end */
+ break;
+
+ case NAND_CMD_RESET:
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, -1, -1);
+
+ writel(0, FLDTCNTR(flctl)); /* set 0 size */
+ start_translation(flctl);
+ wait_completion(flctl);
+ break;
+
+ default:
+ break;
+ }
+ goto runtime_exit;
+
+read_normal_exit:
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
+ start_translation(flctl);
+ read_fiforeg(flctl, flctl->read_bytes, 0);
+ wait_completion(flctl);
+runtime_exit:
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ return;
+}
+
+static void flctl_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
+ int ret;
+
+ switch (chipnr) {
+ case -1:
+ flctl->flcmncr_base &= ~CE0_ENABLE;
+
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(flctl->flcmncr_base, FLCMNCR(flctl));
+
+ if (flctl->qos_request) {
+ dev_pm_qos_remove_request(&flctl->pm_qos);
+ flctl->qos_request = 0;
+ }
+
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ break;
+ case 0:
+ flctl->flcmncr_base |= CE0_ENABLE;
+
+ if (!flctl->qos_request) {
+ ret = dev_pm_qos_add_request(&flctl->pdev->dev,
+ &flctl->pm_qos,
+ DEV_PM_QOS_RESUME_LATENCY,
+ 100);
+ if (ret < 0)
+ dev_err(&flctl->pdev->dev,
+ "PM QoS request failed: %d\n", ret);
+ flctl->qos_request = 1;
+ }
+
+ if (flctl->holden) {
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(HOLDEN, FLHOLDCR(flctl));
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ }
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void flctl_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
+
+ memcpy(&flctl->done_buff[flctl->index], buf, len);
+ flctl->index += len;
+}
+
+static uint8_t flctl_read_byte(struct nand_chip *chip)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
+ uint8_t data;
+
+ data = flctl->done_buff[flctl->index];
+ flctl->index++;
+ return data;
+}
+
+static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
+
+ memcpy(buf, &flctl->done_buff[flctl->index], len);
+ flctl->index += len;
+}
+
+static int flctl_chip_attach_chip(struct nand_chip *chip)
+{
+ u64 targetsize = nanddev_target_size(&chip->base);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+
+ /*
+ * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
+ * Add the SEL_16BIT flag in flctl->flcmncr_base.
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ flctl->flcmncr_base |= SEL_16BIT;
+
+ if (mtd->writesize == 512) {
+ flctl->page_size = 0;
+ if (targetsize > (32 << 20)) {
+ /* big than 32MB */
+ flctl->rw_ADRCNT = ADRCNT_4;
+ flctl->erase_ADRCNT = ADRCNT_3;
+ } else if (targetsize > (2 << 16)) {
+ /* big than 128KB */
+ flctl->rw_ADRCNT = ADRCNT_3;
+ flctl->erase_ADRCNT = ADRCNT_2;
+ } else {
+ flctl->rw_ADRCNT = ADRCNT_2;
+ flctl->erase_ADRCNT = ADRCNT_1;
+ }
+ } else {
+ flctl->page_size = 1;
+ if (targetsize > (128 << 20)) {
+ /* big than 128MB */
+ flctl->rw_ADRCNT = ADRCNT2_E;
+ flctl->erase_ADRCNT = ADRCNT_3;
+ } else if (targetsize > (8 << 16)) {
+ /* big than 512KB */
+ flctl->rw_ADRCNT = ADRCNT_4;
+ flctl->erase_ADRCNT = ADRCNT_2;
+ } else {
+ flctl->rw_ADRCNT = ADRCNT_3;
+ flctl->erase_ADRCNT = ADRCNT_1;
+ }
+ }
+
+ if (flctl->hwecc) {
+ if (mtd->writesize == 512) {
+ mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
+ chip->badblock_pattern = &flctl_4secc_smallpage;
+ } else {
+ mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
+ chip->badblock_pattern = &flctl_4secc_largepage;
+ }
+
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 10;
+ chip->ecc.strength = 4;
+ chip->ecc.read_page = flctl_read_page_hwecc;
+ chip->ecc.write_page = flctl_write_page_hwecc;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ /* 4 symbols ECC enabled */
+ flctl->flcmncr_base |= _4ECCEN;
+ } else {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops flctl_nand_controller_ops = {
+ .attach_chip = flctl_chip_attach_chip,
+};
+
+static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
+{
+ struct sh_flctl *flctl = dev_id;
+
+ dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
+ writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+
+ return IRQ_HANDLED;
+}
+
+struct flctl_soc_config {
+ unsigned long flcmncr_val;
+ unsigned has_hwecc:1;
+ unsigned use_holden:1;
+};
+
+static struct flctl_soc_config flctl_sh7372_config = {
+ .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
+ .has_hwecc = 1,
+ .use_holden = 1,
+};
+
+static const struct of_device_id of_flctl_match[] = {
+ { .compatible = "renesas,shmobile-flctl-sh7372",
+ .data = &flctl_sh7372_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_flctl_match);
+
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+ const struct flctl_soc_config *config;
+ struct sh_flctl_platform_data *pdata;
+
+ config = of_device_get_match_data(dev);
+ if (!config) {
+ dev_err(dev, "%s: no OF configuration attached\n", __func__);
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ /* set SoC specific options */
+ pdata->flcmncr_val = config->flcmncr_val;
+ pdata->has_hwecc = config->has_hwecc;
+ pdata->use_holden = config->use_holden;
+
+ return pdata;
+}
+
+static int flctl_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct sh_flctl *flctl;
+ struct mtd_info *flctl_mtd;
+ struct nand_chip *nand;
+ struct sh_flctl_platform_data *pdata;
+ int ret;
+ int irq;
+
+ flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
+ if (!flctl)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flctl->reg))
+ return PTR_ERR(flctl->reg);
+ flctl->fifo = res->start + 0x24; /* FLDTFIFO */
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
+ "flste", flctl);
+ if (ret) {
+ dev_err(&pdev->dev, "request interrupt failed.\n");
+ return ret;
+ }
+
+ if (pdev->dev.of_node)
+ pdata = flctl_parse_dt(&pdev->dev);
+ else
+ pdata = dev_get_platdata(&pdev->dev);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no setup data defined\n");
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, flctl);
+ nand = &flctl->chip;
+ flctl_mtd = nand_to_mtd(nand);
+ nand_set_flash_node(nand, pdev->dev.of_node);
+ flctl_mtd->dev.parent = &pdev->dev;
+ flctl->pdev = pdev;
+ flctl->hwecc = pdata->has_hwecc;
+ flctl->holden = pdata->use_holden;
+ flctl->flcmncr_base = pdata->flcmncr_val;
+ flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
+
+ /* Set address of hardware control function */
+ /* 20 us command delay time */
+ nand->legacy.chip_delay = 20;
+
+ nand->legacy.read_byte = flctl_read_byte;
+ nand->legacy.write_buf = flctl_write_buf;
+ nand->legacy.read_buf = flctl_read_buf;
+ nand->legacy.select_chip = flctl_select_chip;
+ nand->legacy.cmdfunc = flctl_cmdfunc;
+ nand->legacy.set_features = nand_get_set_features_notsupp;
+ nand->legacy.get_features = nand_get_set_features_notsupp;
+
+ if (pdata->flcmncr_val & SEL_16BIT)
+ nand->options |= NAND_BUSWIDTH_16;
+
+ nand->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ flctl_setup_dma(flctl);
+
+ nand->legacy.dummy_controller.ops = &flctl_nand_controller_ops;
+ ret = nand_scan(nand, 1);
+ if (ret)
+ goto err_chip;
+
+ ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
+ if (ret)
+ goto cleanup_nand;
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(nand);
+err_chip:
+ flctl_release_dma(flctl);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int flctl_remove(struct platform_device *pdev)
+{
+ struct sh_flctl *flctl = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &flctl->chip;
+ int ret;
+
+ flctl_release_dma(flctl);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver flctl_driver = {
+ .remove = flctl_remove,
+ .driver = {
+ .name = "sh_flctl",
+ .of_match_table = of_match_ptr(of_flctl_match),
+ },
+};
+
+module_platform_driver_probe(flctl_driver, flctl_probe);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("SuperH FLCTL driver");
+MODULE_ALIAS("platform:sh_flctl");
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
new file mode 100644
index 000000000..af98bcc9d
--- /dev/null
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 Richard Purdie
+ * Copyright (C) 2008 Dmitry Baryshkov
+ *
+ * Based on Sharp's NAND driver sharp_sl.c
+ */
+
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sharpsl.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+struct sharpsl_nand {
+ struct nand_controller controller;
+ struct nand_chip chip;
+
+ void __iomem *io;
+};
+
+static inline struct sharpsl_nand *mtd_to_sharpsl(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct sharpsl_nand, chip);
+}
+
+/* register offset */
+#define ECCLPLB 0x00 /* line parity 7 - 0 bit */
+#define ECCLPUB 0x04 /* line parity 15 - 8 bit */
+#define ECCCP 0x08 /* column parity 5 - 0 bit */
+#define ECCCNTR 0x0C /* ECC byte counter */
+#define ECCCLRR 0x10 /* cleare ECC */
+#define FLASHIO 0x14 /* Flash I/O */
+#define FLASHCTL 0x18 /* Flash Control */
+
+/* Flash control bit */
+#define FLRYBY (1 << 5)
+#define FLCE1 (1 << 4)
+#define FLWP (1 << 3)
+#define FLALE (1 << 2)
+#define FLCLE (1 << 1)
+#define FLCE0 (1 << 0)
+
+/*
+ * hardware specific access to control-lines
+ * ctrl:
+ * NAND_CNE: bit 0 -> ! bit 0 & 4
+ * NAND_CLE: bit 1 -> bit 1
+ * NAND_ALE: bit 2 -> bit 2
+ *
+ */
+static void sharpsl_nand_hwcontrol(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ unsigned char bits = ctrl & 0x07;
+
+ bits |= (ctrl & 0x01) << 4;
+
+ bits ^= 0x11;
+
+ writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->legacy.IO_ADDR_W);
+}
+
+static int sharpsl_nand_dev_ready(struct nand_chip *chip)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
+ return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
+}
+
+static void sharpsl_nand_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
+ writeb(0, sharpsl->io + ECCCLRR);
+}
+
+static int sharpsl_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char * dat, u_char * ecc_code)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
+ ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
+ ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
+ ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
+ return readb(sharpsl->io + ECCCNTR) != 0;
+}
+
+static int sharpsl_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
+ chip->ecc.calculate = sharpsl_nand_calculate_ecc;
+ chip->ecc.correct = nand_correct_data;
+
+ return 0;
+}
+
+static const struct nand_controller_ops sharpsl_ops = {
+ .attach_chip = sharpsl_attach_chip,
+};
+
+/*
+ * Main initialization routine
+ */
+static int sharpsl_nand_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int err = 0;
+ struct sharpsl_nand *sharpsl;
+ struct sharpsl_nand_platform_data *data = dev_get_platdata(&pdev->dev);
+
+ if (!data) {
+ dev_err(&pdev->dev, "no platform data!\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
+ if (!sharpsl)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no io memory resource defined!\n");
+ err = -ENODEV;
+ goto err_get_res;
+ }
+
+ /* map physical address */
+ sharpsl->io = ioremap(r->start, resource_size(r));
+ if (!sharpsl->io) {
+ dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&sharpsl->chip);
+
+ nand_controller_init(&sharpsl->controller);
+ sharpsl->controller.ops = &sharpsl_ops;
+ this->controller = &sharpsl->controller;
+
+ /* Link the private data with the MTD structure */
+ mtd = nand_to_mtd(this);
+ mtd->dev.parent = &pdev->dev;
+ mtd_set_ooblayout(mtd, data->ecc_layout);
+
+ platform_set_drvdata(pdev, sharpsl);
+
+ /*
+ * PXA initialize
+ */
+ writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
+
+ /* Set address of NAND IO lines */
+ this->legacy.IO_ADDR_R = sharpsl->io + FLASHIO;
+ this->legacy.IO_ADDR_W = sharpsl->io + FLASHIO;
+ /* Set address of hardware control function */
+ this->legacy.cmd_ctrl = sharpsl_nand_hwcontrol;
+ this->legacy.dev_ready = sharpsl_nand_dev_ready;
+ /* 15 us command delay time */
+ this->legacy.chip_delay = 15;
+ this->badblock_pattern = data->badblock_pattern;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(this, 1);
+ if (err)
+ goto err_scan;
+
+ /* Register the partitions */
+ mtd->name = "sharpsl-nand";
+
+ err = mtd_device_parse_register(mtd, data->part_parsers, NULL,
+ data->partitions, data->nr_partitions);
+ if (err)
+ goto err_add;
+
+ /* Return happy */
+ return 0;
+
+err_add:
+ nand_cleanup(this);
+
+err_scan:
+ iounmap(sharpsl->io);
+err_ioremap:
+err_get_res:
+ kfree(sharpsl);
+ return err;
+}
+
+/*
+ * Clean up routine
+ */
+static int sharpsl_nand_remove(struct platform_device *pdev)
+{
+ struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &sharpsl->chip;
+ int ret;
+
+ /* Unregister device */
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+
+ /* Release resources */
+ nand_cleanup(chip);
+
+ iounmap(sharpsl->io);
+
+ /* Free the driver's structure */
+ kfree(sharpsl);
+
+ return 0;
+}
+
+static struct platform_driver sharpsl_nand_driver = {
+ .driver = {
+ .name = "sharpsl-nand",
+ },
+ .probe = sharpsl_nand_probe,
+ .remove = sharpsl_nand_remove,
+};
+
+module_platform_driver(sharpsl_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+MODULE_DESCRIPTION("Device specific logic for NAND flash on Sharp SL-C7xx Series");
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
new file mode 100644
index 000000000..ba24cb36d
--- /dev/null
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for xD format
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include "sm_common.h"
+
+static int oob_sm_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ oobregion->length = 3;
+ oobregion->offset = ((section + 1) * 8) - 3;
+
+ return 0;
+}
+
+static int oob_sm_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ switch (section) {
+ case 0:
+ /* reserved */
+ oobregion->offset = 0;
+ oobregion->length = 4;
+ break;
+ case 1:
+ /* LBA1 */
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ break;
+ case 2:
+ /* LBA2 */
+ oobregion->offset = 11;
+ oobregion->length = 2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops oob_sm_ops = {
+ .ecc = oob_sm_ooblayout_ecc,
+ .free = oob_sm_ooblayout_free,
+};
+
+/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* because the 256 byte devices have page depenent oob layout */
+/* However it does preserve the bad block markers */
+/* If you use smftl, it will bypass this and work correctly */
+/* If you not, then you break SmartMedia compliance anyway */
+
+static int oob_sm_small_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 3;
+ oobregion->offset = 0;
+
+ return 0;
+}
+
+static int oob_sm_small_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ switch (section) {
+ case 0:
+ /* reserved */
+ oobregion->offset = 3;
+ oobregion->length = 2;
+ break;
+ case 1:
+ /* LBA1 */
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops oob_sm_small_ops = {
+ .ecc = oob_sm_small_ooblayout_ecc,
+ .free = oob_sm_small_ooblayout_free,
+};
+
+static int sm_block_markbad(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtd_oob_ops ops;
+ struct sm_oob oob;
+ int ret;
+
+ memset(&oob, -1, SM_OOB_SIZE);
+ oob.block_status = 0x0F;
+
+ /* As long as this function is called on erase block boundaries
+ it will work correctly for 256 byte nand */
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = (void *)&oob;
+ ops.datbuf = NULL;
+
+
+ ret = mtd_write_oob(mtd, ofs, &ops);
+ if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
+ pr_notice("sm_common: can't mark sector at %i as bad\n",
+ (int)ofs);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
+ LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
+ {NULL}
+};
+
+static struct nand_flash_dev nand_xd_flash_ids[] = {
+ LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
+ {NULL}
+};
+
+static int sm_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Bad block marker position */
+ chip->badblockpos = 0x05;
+ chip->badblockbits = 7;
+ chip->legacy.block_markbad = sm_block_markbad;
+
+ /* ECC layout */
+ if (mtd->writesize == SM_SECTOR_SIZE)
+ mtd_set_ooblayout(mtd, &oob_sm_ops);
+ else if (mtd->writesize == SM_SMALL_PAGE)
+ mtd_set_ooblayout(mtd, &oob_sm_small_ops);
+ else
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct nand_controller_ops sm_controller_ops = {
+ .attach_chip = sm_attach_chip,
+};
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_flash_dev *flash_ids;
+ int ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* Scan for card properties */
+ chip->legacy.dummy_controller.ops = &sm_controller_ops;
+ flash_ids = smartmedia ? nand_smartmedia_flash_ids : nand_xd_flash_ids;
+ ret = nand_scan_with_ids(chip, 1, flash_ids);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ nand_cleanup(chip);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sm_register_device);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/raw/sm_common.h b/drivers/mtd/nand/raw/sm_common.h
new file mode 100644
index 000000000..57fc9f86f
--- /dev/null
+++ b/drivers/mtd/nand/raw/sm_common.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for SmartMedia/xD format
+ */
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+
+/* Full oob structure as written on the flash */
+struct sm_oob {
+ uint32_t reserved;
+ uint8_t data_status;
+ uint8_t block_status;
+ uint8_t lba_copy1[2];
+ uint8_t ecc2[3];
+ uint8_t lba_copy2[2];
+ uint8_t ecc1[3];
+} __packed;
+
+
+/* one sector is always 512 bytes, but it can consist of two nand pages */
+#define SM_SECTOR_SIZE 512
+
+/* oob area is also 16 bytes, but might be from two pages */
+#define SM_OOB_SIZE 16
+
+/* This is maximum zone size, and all devices that have more that one zone
+ have this size */
+#define SM_MAX_ZONE_SIZE 1024
+
+/* support for small page nand */
+#define SM_SMALL_PAGE 256
+#define SM_SMALL_OOB_SIZE 8
+
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia);
+
+
+static inline int sm_sector_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->data_status) >= 5;
+}
+
+static inline int sm_block_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->block_status) >= 7;
+}
+
+static inline int sm_block_erased(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, sizeof(*oob)))
+ return 1;
+ return 0;
+}
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
new file mode 100644
index 000000000..fb39cc7eb
--- /dev/null
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2008 Ilya Yanok, Emcraft Systems
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#define FPGA_NAND_CMD_MASK (0x7 << 28)
+#define FPGA_NAND_CMD_COMMAND (0x0 << 28)
+#define FPGA_NAND_CMD_ADDR (0x1 << 28)
+#define FPGA_NAND_CMD_READ (0x2 << 28)
+#define FPGA_NAND_CMD_WRITE (0x3 << 28)
+#define FPGA_NAND_BUSY (0x1 << 15)
+#define FPGA_NAND_ENABLE (0x1 << 31)
+#define FPGA_NAND_DATA_SHIFT 16
+
+struct socrates_nand_host {
+ struct nand_controller controller;
+ struct nand_chip nand_chip;
+ void __iomem *io_base;
+ struct device *dev;
+};
+
+/**
+ * socrates_nand_write_buf - write buffer to chip
+ * @this: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void socrates_nand_write_buf(struct nand_chip *this, const uint8_t *buf,
+ int len)
+{
+ int i;
+ struct socrates_nand_host *host = nand_get_controller_data(this);
+
+ for (i = 0; i < len; i++) {
+ out_be32(host->io_base, FPGA_NAND_ENABLE |
+ FPGA_NAND_CMD_WRITE |
+ (buf[i] << FPGA_NAND_DATA_SHIFT));
+ }
+}
+
+/**
+ * socrates_nand_read_buf - read chip data into buffer
+ * @this: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void socrates_nand_read_buf(struct nand_chip *this, uint8_t *buf,
+ int len)
+{
+ int i;
+ struct socrates_nand_host *host = nand_get_controller_data(this);
+ uint32_t val;
+
+ val = FPGA_NAND_ENABLE | FPGA_NAND_CMD_READ;
+
+ out_be32(host->io_base, val);
+ for (i = 0; i < len; i++) {
+ buf[i] = (in_be32(host->io_base) >>
+ FPGA_NAND_DATA_SHIFT) & 0xff;
+ }
+}
+
+/**
+ * socrates_nand_read_byte - read one byte from the chip
+ * @mtd: MTD device structure
+ */
+static uint8_t socrates_nand_read_byte(struct nand_chip *this)
+{
+ uint8_t byte;
+ socrates_nand_read_buf(this, &byte, sizeof(byte));
+ return byte;
+}
+
+/*
+ * Hardware specific access to control-lines
+ */
+static void socrates_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
+ unsigned int ctrl)
+{
+ struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
+ uint32_t val;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ val = FPGA_NAND_CMD_COMMAND;
+ else
+ val = FPGA_NAND_CMD_ADDR;
+
+ if (ctrl & NAND_NCE)
+ val |= FPGA_NAND_ENABLE;
+
+ val |= (cmd & 0xff) << FPGA_NAND_DATA_SHIFT;
+
+ out_be32(host->io_base, val);
+}
+
+/*
+ * Read the Device Ready pin.
+ */
+static int socrates_nand_device_ready(struct nand_chip *nand_chip)
+{
+ struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
+
+ if (in_be32(host->io_base) & FPGA_NAND_BUSY)
+ return 0; /* busy */
+ return 1;
+}
+
+static int socrates_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops socrates_ops = {
+ .attach_chip = socrates_attach_chip,
+};
+
+/*
+ * Probe for the NAND device.
+ */
+static int socrates_nand_probe(struct platform_device *ofdev)
+{
+ struct socrates_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int res;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->io_base = of_iomap(ofdev->dev.of_node, 0);
+ if (host->io_base == NULL) {
+ dev_err(&ofdev->dev, "ioremap failed\n");
+ return -EIO;
+ }
+
+ nand_chip = &host->nand_chip;
+ mtd = nand_to_mtd(nand_chip);
+ host->dev = &ofdev->dev;
+
+ nand_controller_init(&host->controller);
+ host->controller.ops = &socrates_ops;
+ nand_chip->controller = &host->controller;
+
+ /* link the private data structures */
+ nand_set_controller_data(nand_chip, host);
+ nand_set_flash_node(nand_chip, ofdev->dev.of_node);
+ mtd->name = "socrates_nand";
+ mtd->dev.parent = &ofdev->dev;
+
+ nand_chip->legacy.cmd_ctrl = socrates_nand_cmd_ctrl;
+ nand_chip->legacy.read_byte = socrates_nand_read_byte;
+ nand_chip->legacy.write_buf = socrates_nand_write_buf;
+ nand_chip->legacy.read_buf = socrates_nand_read_buf;
+ nand_chip->legacy.dev_ready = socrates_nand_device_ready;
+
+ /* TODO: I have no idea what real delay is. */
+ nand_chip->legacy.chip_delay = 20; /* 20us command delay time */
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ nand_chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ dev_set_drvdata(&ofdev->dev, host);
+
+ res = nand_scan(nand_chip, 1);
+ if (res)
+ goto out;
+
+ res = mtd_device_register(mtd, NULL, 0);
+ if (!res)
+ return res;
+
+ nand_cleanup(nand_chip);
+
+out:
+ iounmap(host->io_base);
+ return res;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int socrates_nand_remove(struct platform_device *ofdev)
+{
+ struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
+ struct nand_chip *chip = &host->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ iounmap(host->io_base);
+
+ return 0;
+}
+
+static const struct of_device_id socrates_nand_match[] =
+{
+ {
+ .compatible = "abb,socrates-nand",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, socrates_nand_match);
+
+static struct platform_driver socrates_nand_driver = {
+ .driver = {
+ .name = "socrates_nand",
+ .of_match_table = socrates_nand_match,
+ },
+ .probe = socrates_nand_probe,
+ .remove = socrates_nand_remove,
+};
+
+module_platform_driver(socrates_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ilya Yanok");
+MODULE_DESCRIPTION("NAND driver for Socrates board");
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
new file mode 100644
index 000000000..c0c47f31c
--- /dev/null
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -0,0 +1,2084 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018
+ * Author: Christophe Kerello <christophe.kerello@st.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_address.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* Bad block marker length */
+#define FMC2_BBM_LEN 2
+
+/* ECC step size */
+#define FMC2_ECC_STEP_SIZE 512
+
+/* BCHDSRx registers length */
+#define FMC2_BCHDSRS_LEN 20
+
+/* HECCR length */
+#define FMC2_HECCR_LEN 4
+
+/* Max requests done for a 8k nand page size */
+#define FMC2_MAX_SG 16
+
+/* Max chip enable */
+#define FMC2_MAX_CE 2
+
+/* Max ECC buffer length */
+#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
+
+#define FMC2_TIMEOUT_MS 5000
+
+/* Timings */
+#define FMC2_THIZ 1
+#define FMC2_TIO 8000
+#define FMC2_TSYNC 3000
+#define FMC2_PCR_TIMING_MASK 0xf
+#define FMC2_PMEM_PATT_TIMING_MASK 0xff
+
+/* FMC2 Controller Registers */
+#define FMC2_BCR1 0x0
+#define FMC2_PCR 0x80
+#define FMC2_SR 0x84
+#define FMC2_PMEM 0x88
+#define FMC2_PATT 0x8c
+#define FMC2_HECCR 0x94
+#define FMC2_ISR 0x184
+#define FMC2_ICR 0x188
+#define FMC2_CSQCR 0x200
+#define FMC2_CSQCFGR1 0x204
+#define FMC2_CSQCFGR2 0x208
+#define FMC2_CSQCFGR3 0x20c
+#define FMC2_CSQAR1 0x210
+#define FMC2_CSQAR2 0x214
+#define FMC2_CSQIER 0x220
+#define FMC2_CSQISR 0x224
+#define FMC2_CSQICR 0x228
+#define FMC2_CSQEMSR 0x230
+#define FMC2_BCHIER 0x250
+#define FMC2_BCHISR 0x254
+#define FMC2_BCHICR 0x258
+#define FMC2_BCHPBR1 0x260
+#define FMC2_BCHPBR2 0x264
+#define FMC2_BCHPBR3 0x268
+#define FMC2_BCHPBR4 0x26c
+#define FMC2_BCHDSR0 0x27c
+#define FMC2_BCHDSR1 0x280
+#define FMC2_BCHDSR2 0x284
+#define FMC2_BCHDSR3 0x288
+#define FMC2_BCHDSR4 0x28c
+
+/* Register: FMC2_BCR1 */
+#define FMC2_BCR1_FMC2EN BIT(31)
+
+/* Register: FMC2_PCR */
+#define FMC2_PCR_PWAITEN BIT(1)
+#define FMC2_PCR_PBKEN BIT(2)
+#define FMC2_PCR_PWID GENMASK(5, 4)
+#define FMC2_PCR_PWID_BUSWIDTH_8 0
+#define FMC2_PCR_PWID_BUSWIDTH_16 1
+#define FMC2_PCR_ECCEN BIT(6)
+#define FMC2_PCR_ECCALG BIT(8)
+#define FMC2_PCR_TCLR GENMASK(12, 9)
+#define FMC2_PCR_TCLR_DEFAULT 0xf
+#define FMC2_PCR_TAR GENMASK(16, 13)
+#define FMC2_PCR_TAR_DEFAULT 0xf
+#define FMC2_PCR_ECCSS GENMASK(19, 17)
+#define FMC2_PCR_ECCSS_512 1
+#define FMC2_PCR_ECCSS_2048 3
+#define FMC2_PCR_BCHECC BIT(24)
+#define FMC2_PCR_WEN BIT(25)
+
+/* Register: FMC2_SR */
+#define FMC2_SR_NWRF BIT(6)
+
+/* Register: FMC2_PMEM */
+#define FMC2_PMEM_MEMSET GENMASK(7, 0)
+#define FMC2_PMEM_MEMWAIT GENMASK(15, 8)
+#define FMC2_PMEM_MEMHOLD GENMASK(23, 16)
+#define FMC2_PMEM_MEMHIZ GENMASK(31, 24)
+#define FMC2_PMEM_DEFAULT 0x0a0a0a0a
+
+/* Register: FMC2_PATT */
+#define FMC2_PATT_ATTSET GENMASK(7, 0)
+#define FMC2_PATT_ATTWAIT GENMASK(15, 8)
+#define FMC2_PATT_ATTHOLD GENMASK(23, 16)
+#define FMC2_PATT_ATTHIZ GENMASK(31, 24)
+#define FMC2_PATT_DEFAULT 0x0a0a0a0a
+
+/* Register: FMC2_ISR */
+#define FMC2_ISR_IHLF BIT(1)
+
+/* Register: FMC2_ICR */
+#define FMC2_ICR_CIHLF BIT(1)
+
+/* Register: FMC2_CSQCR */
+#define FMC2_CSQCR_CSQSTART BIT(0)
+
+/* Register: FMC2_CSQCFGR1 */
+#define FMC2_CSQCFGR1_CMD2EN BIT(1)
+#define FMC2_CSQCFGR1_DMADEN BIT(2)
+#define FMC2_CSQCFGR1_ACYNBR GENMASK(6, 4)
+#define FMC2_CSQCFGR1_CMD1 GENMASK(15, 8)
+#define FMC2_CSQCFGR1_CMD2 GENMASK(23, 16)
+#define FMC2_CSQCFGR1_CMD1T BIT(24)
+#define FMC2_CSQCFGR1_CMD2T BIT(25)
+
+/* Register: FMC2_CSQCFGR2 */
+#define FMC2_CSQCFGR2_SQSDTEN BIT(0)
+#define FMC2_CSQCFGR2_RCMD2EN BIT(1)
+#define FMC2_CSQCFGR2_DMASEN BIT(2)
+#define FMC2_CSQCFGR2_RCMD1 GENMASK(15, 8)
+#define FMC2_CSQCFGR2_RCMD2 GENMASK(23, 16)
+#define FMC2_CSQCFGR2_RCMD1T BIT(24)
+#define FMC2_CSQCFGR2_RCMD2T BIT(25)
+
+/* Register: FMC2_CSQCFGR3 */
+#define FMC2_CSQCFGR3_SNBR GENMASK(13, 8)
+#define FMC2_CSQCFGR3_AC1T BIT(16)
+#define FMC2_CSQCFGR3_AC2T BIT(17)
+#define FMC2_CSQCFGR3_AC3T BIT(18)
+#define FMC2_CSQCFGR3_AC4T BIT(19)
+#define FMC2_CSQCFGR3_AC5T BIT(20)
+#define FMC2_CSQCFGR3_SDT BIT(21)
+#define FMC2_CSQCFGR3_RAC1T BIT(22)
+#define FMC2_CSQCFGR3_RAC2T BIT(23)
+
+/* Register: FMC2_CSQCAR1 */
+#define FMC2_CSQCAR1_ADDC1 GENMASK(7, 0)
+#define FMC2_CSQCAR1_ADDC2 GENMASK(15, 8)
+#define FMC2_CSQCAR1_ADDC3 GENMASK(23, 16)
+#define FMC2_CSQCAR1_ADDC4 GENMASK(31, 24)
+
+/* Register: FMC2_CSQCAR2 */
+#define FMC2_CSQCAR2_ADDC5 GENMASK(7, 0)
+#define FMC2_CSQCAR2_NANDCEN GENMASK(11, 10)
+#define FMC2_CSQCAR2_SAO GENMASK(31, 16)
+
+/* Register: FMC2_CSQIER */
+#define FMC2_CSQIER_TCIE BIT(0)
+
+/* Register: FMC2_CSQICR */
+#define FMC2_CSQICR_CLEAR_IRQ GENMASK(4, 0)
+
+/* Register: FMC2_CSQEMSR */
+#define FMC2_CSQEMSR_SEM GENMASK(15, 0)
+
+/* Register: FMC2_BCHIER */
+#define FMC2_BCHIER_DERIE BIT(1)
+#define FMC2_BCHIER_EPBRIE BIT(4)
+
+/* Register: FMC2_BCHICR */
+#define FMC2_BCHICR_CLEAR_IRQ GENMASK(4, 0)
+
+/* Register: FMC2_BCHDSR0 */
+#define FMC2_BCHDSR0_DUE BIT(0)
+#define FMC2_BCHDSR0_DEF BIT(1)
+#define FMC2_BCHDSR0_DEN GENMASK(7, 4)
+
+/* Register: FMC2_BCHDSR1 */
+#define FMC2_BCHDSR1_EBP1 GENMASK(12, 0)
+#define FMC2_BCHDSR1_EBP2 GENMASK(28, 16)
+
+/* Register: FMC2_BCHDSR2 */
+#define FMC2_BCHDSR2_EBP3 GENMASK(12, 0)
+#define FMC2_BCHDSR2_EBP4 GENMASK(28, 16)
+
+/* Register: FMC2_BCHDSR3 */
+#define FMC2_BCHDSR3_EBP5 GENMASK(12, 0)
+#define FMC2_BCHDSR3_EBP6 GENMASK(28, 16)
+
+/* Register: FMC2_BCHDSR4 */
+#define FMC2_BCHDSR4_EBP7 GENMASK(12, 0)
+#define FMC2_BCHDSR4_EBP8 GENMASK(28, 16)
+
+enum stm32_fmc2_ecc {
+ FMC2_ECC_HAM = 1,
+ FMC2_ECC_BCH4 = 4,
+ FMC2_ECC_BCH8 = 8
+};
+
+enum stm32_fmc2_irq_state {
+ FMC2_IRQ_UNKNOWN = 0,
+ FMC2_IRQ_BCH,
+ FMC2_IRQ_SEQ
+};
+
+struct stm32_fmc2_timings {
+ u8 tclr;
+ u8 tar;
+ u8 thiz;
+ u8 twait;
+ u8 thold_mem;
+ u8 tset_mem;
+ u8 thold_att;
+ u8 tset_att;
+};
+
+struct stm32_fmc2_nand {
+ struct nand_chip chip;
+ struct stm32_fmc2_timings timings;
+ int ncs;
+ int cs_used[FMC2_MAX_CE];
+};
+
+static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct stm32_fmc2_nand, chip);
+}
+
+struct stm32_fmc2_nfc {
+ struct nand_controller base;
+ struct stm32_fmc2_nand nand;
+ struct device *dev;
+ struct device *cdev;
+ struct regmap *regmap;
+ void __iomem *data_base[FMC2_MAX_CE];
+ void __iomem *cmd_base[FMC2_MAX_CE];
+ void __iomem *addr_base[FMC2_MAX_CE];
+ phys_addr_t io_phys_addr;
+ phys_addr_t data_phys_addr[FMC2_MAX_CE];
+ struct clk *clk;
+ u8 irq_state;
+
+ struct dma_chan *dma_tx_ch;
+ struct dma_chan *dma_rx_ch;
+ struct dma_chan *dma_ecc_ch;
+ struct sg_table dma_data_sg;
+ struct sg_table dma_ecc_sg;
+ u8 *ecc_buf;
+ int dma_ecc_len;
+
+ struct completion complete;
+ struct completion dma_data_complete;
+ struct completion dma_ecc_complete;
+
+ u8 cs_assigned;
+ int cs_sel;
+};
+
+static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base)
+{
+ return container_of(base, struct stm32_fmc2_nfc, base);
+}
+
+static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct stm32_fmc2_timings *timings = &nand->timings;
+ u32 pmem, patt;
+
+ /* Set tclr/tar timings */
+ regmap_update_bits(nfc->regmap, FMC2_PCR,
+ FMC2_PCR_TCLR | FMC2_PCR_TAR,
+ FIELD_PREP(FMC2_PCR_TCLR, timings->tclr) |
+ FIELD_PREP(FMC2_PCR_TAR, timings->tar));
+
+ /* Set tset/twait/thold/thiz timings in common bank */
+ pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz);
+ regmap_write(nfc->regmap, FMC2_PMEM, pmem);
+
+ /* Set tset/twait/thold/thiz timings in attribut bank */
+ patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att);
+ patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait);
+ patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att);
+ patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz);
+ regmap_write(nfc->regmap, FMC2_PATT, patt);
+}
+
+static void stm32_fmc2_nfc_setup(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 pcr = 0, pcr_mask;
+
+ /* Configure ECC algorithm (default configuration is Hamming) */
+ pcr_mask = FMC2_PCR_ECCALG;
+ pcr_mask |= FMC2_PCR_BCHECC;
+ if (chip->ecc.strength == FMC2_ECC_BCH8) {
+ pcr |= FMC2_PCR_ECCALG;
+ pcr |= FMC2_PCR_BCHECC;
+ } else if (chip->ecc.strength == FMC2_ECC_BCH4) {
+ pcr |= FMC2_PCR_ECCALG;
+ }
+
+ /* Set buswidth */
+ pcr_mask |= FMC2_PCR_PWID;
+ if (chip->options & NAND_BUSWIDTH_16)
+ pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
+
+ /* Set ECC sector size */
+ pcr_mask |= FMC2_PCR_ECCSS;
+ pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512);
+
+ regmap_update_bits(nfc->regmap, FMC2_PCR, pcr_mask, pcr);
+}
+
+static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct dma_slave_config dma_cfg;
+ int ret;
+
+ if (nand->cs_used[chipnr] == nfc->cs_sel)
+ return 0;
+
+ nfc->cs_sel = nand->cs_used[chipnr];
+ stm32_fmc2_nfc_setup(chip);
+ stm32_fmc2_nfc_timings_init(chip);
+
+ if (nfc->dma_tx_ch && nfc->dma_rx_ch) {
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
+ dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel];
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.src_maxburst = 32;
+ dma_cfg.dst_maxburst = 32;
+
+ ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg);
+ if (ret) {
+ dev_err(nfc->dev, "tx DMA engine slave config failed\n");
+ return ret;
+ }
+
+ ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg);
+ if (ret) {
+ dev_err(nfc->dev, "rx DMA engine slave config failed\n");
+ return ret;
+ }
+ }
+
+ if (nfc->dma_ecc_ch) {
+ /*
+ * Hamming: we read HECCR register
+ * BCH4/BCH8: we read BCHDSRSx registers
+ */
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = nfc->io_phys_addr;
+ dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ?
+ FMC2_HECCR : FMC2_BCHDSR0;
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ ret = dmaengine_slave_config(nfc->dma_ecc_ch, &dma_cfg);
+ if (ret) {
+ dev_err(nfc->dev, "ECC DMA engine slave config failed\n");
+ return ret;
+ }
+
+ /* Calculate ECC length needed for one sector */
+ nfc->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
+ FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
+ }
+
+ return 0;
+}
+
+static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set)
+{
+ u32 pcr;
+
+ pcr = set ? FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16) :
+ FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_8);
+
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_PWID, pcr);
+}
+
+static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable)
+{
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_ECCEN,
+ enable ? FMC2_PCR_ECCEN : 0);
+}
+
+static void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc)
+{
+ nfc->irq_state = FMC2_IRQ_SEQ;
+
+ regmap_update_bits(nfc->regmap, FMC2_CSQIER,
+ FMC2_CSQIER_TCIE, FMC2_CSQIER_TCIE);
+}
+
+static void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc)
+{
+ regmap_update_bits(nfc->regmap, FMC2_CSQIER, FMC2_CSQIER_TCIE, 0);
+
+ nfc->irq_state = FMC2_IRQ_UNKNOWN;
+}
+
+static void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc)
+{
+ regmap_write(nfc->regmap, FMC2_CSQICR, FMC2_CSQICR_CLEAR_IRQ);
+}
+
+static void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc, int mode)
+{
+ nfc->irq_state = FMC2_IRQ_BCH;
+
+ if (mode == NAND_ECC_WRITE)
+ regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+ FMC2_BCHIER_EPBRIE, FMC2_BCHIER_EPBRIE);
+ else
+ regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+ FMC2_BCHIER_DERIE, FMC2_BCHIER_DERIE);
+}
+
+static void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc)
+{
+ regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+ FMC2_BCHIER_DERIE | FMC2_BCHIER_EPBRIE, 0);
+
+ nfc->irq_state = FMC2_IRQ_UNKNOWN;
+}
+
+static void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc)
+{
+ regmap_write(nfc->regmap, FMC2_BCHICR, FMC2_BCHICR_CLEAR_IRQ);
+}
+
+/*
+ * Enable ECC logic and reset syndrome/parity bits previously calculated
+ * Syndrome/parity bits is cleared by setting the ECCEN bit to 0
+ */
+static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ if (chip->ecc.strength != FMC2_ECC_HAM) {
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
+ mode == NAND_ECC_WRITE ? FMC2_PCR_WEN : 0);
+
+ reinit_completion(&nfc->complete);
+ stm32_fmc2_nfc_clear_bch_irq(nfc);
+ stm32_fmc2_nfc_enable_bch_irq(nfc, mode);
+ }
+
+ stm32_fmc2_nfc_set_ecc(nfc, true);
+}
+
+/*
+ * ECC Hamming calculation
+ * ECC is 3 bytes for 512 bytes of data (supports error correction up to
+ * max of 1-bit)
+ */
+static void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
+{
+ ecc[0] = ecc_sta;
+ ecc[1] = ecc_sta >> 8;
+ ecc[2] = ecc_sta >> 16;
+}
+
+static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 sr, heccr;
+ int ret;
+
+ ret = regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
+ sr & FMC2_SR_NWRF, 1,
+ 1000 * FMC2_TIMEOUT_MS);
+ if (ret) {
+ dev_err(nfc->dev, "ham timeout\n");
+ return ret;
+ }
+
+ regmap_read(nfc->regmap, FMC2_HECCR, &heccr);
+ stm32_fmc2_nfc_ham_set_ecc(heccr, ecc);
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_ham_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ u8 bit_position = 0, b0, b1, b2;
+ u32 byte_addr = 0, b;
+ u32 i, shifting = 1;
+
+ /* Indicate which bit and byte is faulty (if any) */
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+ b2 = read_ecc[2] ^ calc_ecc[2];
+ b = b0 | (b1 << 8) | (b2 << 16);
+
+ /* No errors */
+ if (likely(!b))
+ return 0;
+
+ /* Calculate bit position */
+ for (i = 0; i < 3; i++) {
+ switch (b % 4) {
+ case 2:
+ bit_position += shifting;
+ case 1:
+ break;
+ default:
+ return -EBADMSG;
+ }
+ shifting <<= 1;
+ b >>= 2;
+ }
+
+ /* Calculate byte position */
+ shifting = 1;
+ for (i = 0; i < 9; i++) {
+ switch (b % 4) {
+ case 2:
+ byte_addr += shifting;
+ case 1:
+ break;
+ default:
+ return -EBADMSG;
+ }
+ shifting <<= 1;
+ b >>= 2;
+ }
+
+ /* Flip the bit */
+ dat[byte_addr] ^= (1 << bit_position);
+
+ return 1;
+}
+
+/*
+ * ECC BCH calculation and correction
+ * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to
+ * max of 4-bit/8-bit)
+ */
+static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 bchpbr;
+
+ /* Wait until the BCH code is ready */
+ if (!wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
+ dev_err(nfc->dev, "bch timeout\n");
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
+ return -ETIMEDOUT;
+ }
+
+ /* Read parity bits */
+ regmap_read(nfc->regmap, FMC2_BCHPBR1, &bchpbr);
+ ecc[0] = bchpbr;
+ ecc[1] = bchpbr >> 8;
+ ecc[2] = bchpbr >> 16;
+ ecc[3] = bchpbr >> 24;
+
+ regmap_read(nfc->regmap, FMC2_BCHPBR2, &bchpbr);
+ ecc[4] = bchpbr;
+ ecc[5] = bchpbr >> 8;
+ ecc[6] = bchpbr >> 16;
+
+ if (chip->ecc.strength == FMC2_ECC_BCH8) {
+ ecc[7] = bchpbr >> 24;
+
+ regmap_read(nfc->regmap, FMC2_BCHPBR3, &bchpbr);
+ ecc[8] = bchpbr;
+ ecc[9] = bchpbr >> 8;
+ ecc[10] = bchpbr >> 16;
+ ecc[11] = bchpbr >> 24;
+
+ regmap_read(nfc->regmap, FMC2_BCHPBR4, &bchpbr);
+ ecc[12] = bchpbr;
+ }
+
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
+{
+ u32 bchdsr0 = ecc_sta[0];
+ u32 bchdsr1 = ecc_sta[1];
+ u32 bchdsr2 = ecc_sta[2];
+ u32 bchdsr3 = ecc_sta[3];
+ u32 bchdsr4 = ecc_sta[4];
+ u16 pos[8];
+ int i, den;
+ unsigned int nb_errs = 0;
+
+ /* No errors found */
+ if (likely(!(bchdsr0 & FMC2_BCHDSR0_DEF)))
+ return 0;
+
+ /* Too many errors detected */
+ if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE))
+ return -EBADMSG;
+
+ pos[0] = FIELD_GET(FMC2_BCHDSR1_EBP1, bchdsr1);
+ pos[1] = FIELD_GET(FMC2_BCHDSR1_EBP2, bchdsr1);
+ pos[2] = FIELD_GET(FMC2_BCHDSR2_EBP3, bchdsr2);
+ pos[3] = FIELD_GET(FMC2_BCHDSR2_EBP4, bchdsr2);
+ pos[4] = FIELD_GET(FMC2_BCHDSR3_EBP5, bchdsr3);
+ pos[5] = FIELD_GET(FMC2_BCHDSR3_EBP6, bchdsr3);
+ pos[6] = FIELD_GET(FMC2_BCHDSR4_EBP7, bchdsr4);
+ pos[7] = FIELD_GET(FMC2_BCHDSR4_EBP8, bchdsr4);
+
+ den = FIELD_GET(FMC2_BCHDSR0_DEN, bchdsr0);
+ for (i = 0; i < den; i++) {
+ if (pos[i] < eccsize * 8) {
+ change_bit(pos[i], (unsigned long *)dat);
+ nb_errs++;
+ }
+ }
+
+ return nb_errs;
+}
+
+static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 ecc_sta[5];
+
+ /* Wait until the decoding error is ready */
+ if (!wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
+ dev_err(nfc->dev, "bch timeout\n");
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
+ return -ETIMEDOUT;
+ }
+
+ regmap_bulk_read(nfc->regmap, FMC2_BCHDSR0, ecc_sta, 5);
+
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ return stm32_fmc2_nfc_bch_decode(chip->ecc.size, dat, ecc_sta);
+}
+
+static int stm32_fmc2_nfc_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret, i, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccstrength = chip->ecc.strength;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = mtd->writesize + FMC2_BBM_LEN, s = 0; s < eccsteps;
+ s++, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ /* Read the nand page sector (512 bytes) */
+ ret = nand_change_read_column_op(chip, s * eccsize, p,
+ eccsize, false);
+ if (ret)
+ return ret;
+
+ /* Read the corresponding ECC bytes */
+ ret = nand_change_read_column_op(chip, i, ecc_code,
+ eccbytes, false);
+ if (ret)
+ return ret;
+
+ /* Correct the data */
+ stat = chip->ecc.correct(chip, p, ecc_code, ecc_calc);
+ if (stat == -EBADMSG)
+ /* Check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ ecc_code, eccbytes,
+ NULL, 0,
+ eccstrength);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ /* Read oob */
+ if (oob_required) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return max_bitflips;
+}
+
+/* Sequencer read/write configuration */
+static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
+ int raw, bool write_data)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN;
+ /*
+ * cfg[0] => csqcfgr1, cfg[1] => csqcfgr2, cfg[2] => csqcfgr3
+ * cfg[3] => csqar1, cfg[4] => csqar2
+ */
+ u32 cfg[5];
+
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
+ write_data ? FMC2_PCR_WEN : 0);
+
+ /*
+ * - Set Program Page/Page Read command
+ * - Enable DMA request data
+ * - Set timings
+ */
+ cfg[0] = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
+ if (write_data)
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN);
+ else
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) |
+ FMC2_CSQCFGR1_CMD2EN |
+ FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) |
+ FMC2_CSQCFGR1_CMD2T;
+
+ /*
+ * - Set Random Data Input/Random Data Read command
+ * - Enable the sequencer to access the Spare data area
+ * - Enable DMA request status decoding for read
+ * - Set timings
+ */
+ if (write_data)
+ cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN);
+ else
+ cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) |
+ FMC2_CSQCFGR2_RCMD2EN |
+ FIELD_PREP(FMC2_CSQCFGR2_RCMD2, NAND_CMD_RNDOUTSTART) |
+ FMC2_CSQCFGR2_RCMD1T |
+ FMC2_CSQCFGR2_RCMD2T;
+ if (!raw) {
+ cfg[1] |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
+ cfg[1] |= FMC2_CSQCFGR2_SQSDTEN;
+ }
+
+ /*
+ * - Set the number of sectors to be written
+ * - Set timings
+ */
+ cfg[2] = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1);
+ if (write_data) {
+ cfg[2] |= FMC2_CSQCFGR3_RAC2T;
+ if (chip->options & NAND_ROW_ADDR_3)
+ cfg[2] |= FMC2_CSQCFGR3_AC5T;
+ else
+ cfg[2] |= FMC2_CSQCFGR3_AC4T;
+ }
+
+ /*
+ * Set the fourth first address cycles
+ * Byte 1 and byte 2 => column, we start at 0x0
+ * Byte 3 and byte 4 => page
+ */
+ cfg[3] = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page);
+ cfg[3] |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8);
+
+ /*
+ * - Set chip enable number
+ * - Set ECC byte offset in the spare area
+ * - Calculate the number of address cycles to be issued
+ * - Set byte 5 of address cycle if needed
+ */
+ cfg[4] = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel);
+ if (chip->options & NAND_BUSWIDTH_16)
+ cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1);
+ else
+ cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset);
+ if (chip->options & NAND_ROW_ADDR_3) {
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5);
+ cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16);
+ } else {
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4);
+ }
+
+ regmap_bulk_write(nfc->regmap, FMC2_CSQCFGR1, cfg, 5);
+}
+
+static void stm32_fmc2_nfc_dma_callback(void *arg)
+{
+ complete((struct completion *)arg);
+}
+
+/* Read/write data from/to a page */
+static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
+ int raw, bool write_data)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct dma_async_tx_descriptor *desc_data, *desc_ecc;
+ struct scatterlist *sg;
+ struct dma_chan *dma_ch = nfc->dma_rx_ch;
+ enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE;
+ enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM;
+ int eccsteps = chip->ecc.steps;
+ int eccsize = chip->ecc.size;
+ unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS);
+ const u8 *p = buf;
+ int s, ret;
+
+ /* Configure DMA data */
+ if (write_data) {
+ dma_data_dir = DMA_TO_DEVICE;
+ dma_transfer_dir = DMA_MEM_TO_DEV;
+ dma_ch = nfc->dma_tx_ch;
+ }
+
+ for_each_sg(nfc->dma_data_sg.sgl, sg, eccsteps, s) {
+ sg_set_buf(sg, p, eccsize);
+ p += eccsize;
+ }
+
+ ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl,
+ eccsteps, dma_data_dir);
+ if (ret < 0)
+ return ret;
+
+ desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl,
+ eccsteps, dma_transfer_dir,
+ DMA_PREP_INTERRUPT);
+ if (!desc_data) {
+ ret = -ENOMEM;
+ goto err_unmap_data;
+ }
+
+ reinit_completion(&nfc->dma_data_complete);
+ reinit_completion(&nfc->complete);
+ desc_data->callback = stm32_fmc2_nfc_dma_callback;
+ desc_data->callback_param = &nfc->dma_data_complete;
+ ret = dma_submit_error(dmaengine_submit(desc_data));
+ if (ret)
+ goto err_unmap_data;
+
+ dma_async_issue_pending(dma_ch);
+
+ if (!write_data && !raw) {
+ /* Configure DMA ECC status */
+ p = nfc->ecc_buf;
+ for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
+ sg_set_buf(sg, p, nfc->dma_ecc_len);
+ p += nfc->dma_ecc_len;
+ }
+
+ ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+ eccsteps, dma_data_dir);
+ if (ret < 0)
+ goto err_unmap_data;
+
+ desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
+ nfc->dma_ecc_sg.sgl,
+ eccsteps, dma_transfer_dir,
+ DMA_PREP_INTERRUPT);
+ if (!desc_ecc) {
+ ret = -ENOMEM;
+ goto err_unmap_ecc;
+ }
+
+ reinit_completion(&nfc->dma_ecc_complete);
+ desc_ecc->callback = stm32_fmc2_nfc_dma_callback;
+ desc_ecc->callback_param = &nfc->dma_ecc_complete;
+ ret = dma_submit_error(dmaengine_submit(desc_ecc));
+ if (ret)
+ goto err_unmap_ecc;
+
+ dma_async_issue_pending(nfc->dma_ecc_ch);
+ }
+
+ stm32_fmc2_nfc_clear_seq_irq(nfc);
+ stm32_fmc2_nfc_enable_seq_irq(nfc);
+
+ /* Start the transfer */
+ regmap_update_bits(nfc->regmap, FMC2_CSQCR,
+ FMC2_CSQCR_CSQSTART, FMC2_CSQCR_CSQSTART);
+
+ /* Wait end of sequencer transfer */
+ if (!wait_for_completion_timeout(&nfc->complete, timeout)) {
+ dev_err(nfc->dev, "seq timeout\n");
+ stm32_fmc2_nfc_disable_seq_irq(nfc);
+ dmaengine_terminate_all(dma_ch);
+ if (!write_data && !raw)
+ dmaengine_terminate_all(nfc->dma_ecc_ch);
+ ret = -ETIMEDOUT;
+ goto err_unmap_ecc;
+ }
+
+ /* Wait DMA data transfer completion */
+ if (!wait_for_completion_timeout(&nfc->dma_data_complete, timeout)) {
+ dev_err(nfc->dev, "data DMA timeout\n");
+ dmaengine_terminate_all(dma_ch);
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait DMA ECC transfer completion */
+ if (!write_data && !raw) {
+ if (!wait_for_completion_timeout(&nfc->dma_ecc_complete,
+ timeout)) {
+ dev_err(nfc->dev, "ECC DMA timeout\n");
+ dmaengine_terminate_all(nfc->dma_ecc_ch);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+err_unmap_ecc:
+ if (!write_data && !raw)
+ dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+ eccsteps, dma_data_dir);
+
+err_unmap_data:
+ dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
+
+ return ret;
+}
+
+static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_nfc_rw_page_init(chip, page, raw, true);
+
+ /* Write the page */
+ ret = stm32_fmc2_nfc_xfer(chip, buf, raw, true);
+ if (ret)
+ return ret;
+
+ /* Write oob */
+ if (oob_required) {
+ ret = nand_change_write_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int stm32_fmc2_nfc_seq_write_page(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, false);
+}
+
+static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ int ret;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, true);
+}
+
+/* Get a status indicating which sectors have errors */
+static u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc)
+{
+ u32 csqemsr;
+
+ regmap_read(nfc->regmap, FMC2_CSQEMSR, &csqemsr);
+
+ return FIELD_GET(FMC2_CSQEMSR_SEM, csqemsr);
+}
+
+static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccstrength = chip->ecc.strength;
+ int i, s, eccsize = chip->ecc.size;
+ u32 *ecc_sta = (u32 *)nfc->ecc_buf;
+ u16 sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
+ unsigned int max_bitflips = 0;
+
+ for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) {
+ int stat = 0;
+
+ if (eccstrength == FMC2_ECC_HAM) {
+ /* Ecc_sta = FMC2_HECCR */
+ if (sta_map & BIT(s)) {
+ stm32_fmc2_nfc_ham_set_ecc(*ecc_sta,
+ &calc_ecc[i]);
+ stat = stm32_fmc2_nfc_ham_correct(chip, dat,
+ &read_ecc[i],
+ &calc_ecc[i]);
+ }
+ ecc_sta++;
+ } else {
+ /*
+ * Ecc_sta[0] = FMC2_BCHDSR0
+ * Ecc_sta[1] = FMC2_BCHDSR1
+ * Ecc_sta[2] = FMC2_BCHDSR2
+ * Ecc_sta[3] = FMC2_BCHDSR3
+ * Ecc_sta[4] = FMC2_BCHDSR4
+ */
+ if (sta_map & BIT(s))
+ stat = stm32_fmc2_nfc_bch_decode(eccsize, dat,
+ ecc_sta);
+ ecc_sta += 5;
+ }
+
+ if (stat == -EBADMSG)
+ /* Check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(dat, eccsize,
+ &read_ecc[i],
+ eccbytes,
+ NULL, 0,
+ eccstrength);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int stm32_fmc2_nfc_seq_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ u16 sta_map;
+ int ret;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_nfc_rw_page_init(chip, page, 0, false);
+
+ /* Read the page */
+ ret = stm32_fmc2_nfc_xfer(chip, buf, 0, false);
+ if (ret)
+ return ret;
+
+ sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
+
+ /* Check if errors happen */
+ if (likely(!sta_map)) {
+ if (oob_required)
+ return nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+
+ return 0;
+ }
+
+ /* Read oob */
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* Correct data */
+ return chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
+}
+
+static int stm32_fmc2_nfc_seq_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_nfc_rw_page_init(chip, page, 1, false);
+
+ /* Read the page */
+ ret = stm32_fmc2_nfc_xfer(chip, buf, 1, false);
+ if (ret)
+ return ret;
+
+ /* Read oob */
+ if (oob_required)
+ return nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+
+ return 0;
+}
+
+static irqreturn_t stm32_fmc2_nfc_irq(int irq, void *dev_id)
+{
+ struct stm32_fmc2_nfc *nfc = (struct stm32_fmc2_nfc *)dev_id;
+
+ if (nfc->irq_state == FMC2_IRQ_SEQ)
+ /* Sequencer is used */
+ stm32_fmc2_nfc_disable_seq_irq(nfc);
+ else if (nfc->irq_state == FMC2_IRQ_BCH)
+ /* BCH is used */
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
+
+ complete(&nfc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_fmc2_nfc_read_data(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ void __iomem *io_addr_r = nfc->data_base[nfc->cs_sel];
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 8-bit */
+ stm32_fmc2_nfc_set_buswidth_16(nfc, false);
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
+ *(u8 *)buf = readb_relaxed(io_addr_r);
+ buf += sizeof(u8);
+ len -= sizeof(u8);
+ }
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ len >= sizeof(u16)) {
+ *(u16 *)buf = readw_relaxed(io_addr_r);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+ }
+
+ /* Buf is aligned */
+ while (len >= sizeof(u32)) {
+ *(u32 *)buf = readl_relaxed(io_addr_r);
+ buf += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ /* Read remaining bytes */
+ if (len >= sizeof(u16)) {
+ *(u16 *)buf = readw_relaxed(io_addr_r);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+
+ if (len)
+ *(u8 *)buf = readb_relaxed(io_addr_r);
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 16-bit */
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
+}
+
+static void stm32_fmc2_nfc_write_data(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ void __iomem *io_addr_w = nfc->data_base[nfc->cs_sel];
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 8-bit */
+ stm32_fmc2_nfc_set_buswidth_16(nfc, false);
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
+ writeb_relaxed(*(u8 *)buf, io_addr_w);
+ buf += sizeof(u8);
+ len -= sizeof(u8);
+ }
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ len >= sizeof(u16)) {
+ writew_relaxed(*(u16 *)buf, io_addr_w);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+ }
+
+ /* Buf is aligned */
+ while (len >= sizeof(u32)) {
+ writel_relaxed(*(u32 *)buf, io_addr_w);
+ buf += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ /* Write remaining bytes */
+ if (len >= sizeof(u16)) {
+ writew_relaxed(*(u16 *)buf, io_addr_w);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+
+ if (len)
+ writeb_relaxed(*(u8 *)buf, io_addr_w);
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 16-bit */
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
+}
+
+static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip,
+ unsigned long timeout_ms)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ const struct nand_sdr_timings *timings;
+ u32 isr, sr;
+
+ /* Check if there is no pending requests to the NAND flash */
+ if (regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
+ sr & FMC2_SR_NWRF, 1,
+ 1000 * FMC2_TIMEOUT_MS))
+ dev_warn(nfc->dev, "Waitrdy timeout\n");
+
+ /* Wait tWB before R/B# signal is low */
+ timings = nand_get_sdr_timings(nand_get_interface_config(chip));
+ ndelay(PSEC_TO_NSEC(timings->tWB_max));
+
+ /* R/B# signal is low, clear high level flag */
+ regmap_write(nfc->regmap, FMC2_ICR, FMC2_ICR_CIHLF);
+
+ /* Wait R/B# signal is high */
+ return regmap_read_poll_timeout(nfc->regmap, FMC2_ISR, isr,
+ isr & FMC2_ISR_IHLF, 5,
+ 1000 * FMC2_TIMEOUT_MS);
+}
+
+static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id, i, timeout;
+ int ret;
+
+ if (check_only)
+ return 0;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, op->cs);
+ if (ret)
+ return ret;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb_relaxed(instr->ctx.cmd.opcode,
+ nfc->cmd_base[nfc->cs_sel]);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb_relaxed(instr->ctx.addr.addrs[i],
+ nfc->addr_base[nfc->cs_sel]);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ stm32_fmc2_nfc_read_data(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ stm32_fmc2_nfc_write_data(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ timeout = instr->ctx.waitrdy.timeout_ms;
+ ret = stm32_fmc2_nfc_waitrdy(chip, timeout);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc)
+{
+ u32 pcr;
+
+ regmap_read(nfc->regmap, FMC2_PCR, &pcr);
+
+ /* Set CS used to undefined */
+ nfc->cs_sel = -1;
+
+ /* Enable wait feature and nand flash memory bank */
+ pcr |= FMC2_PCR_PWAITEN;
+ pcr |= FMC2_PCR_PBKEN;
+
+ /* Set buswidth to 8 bits mode for identification */
+ pcr &= ~FMC2_PCR_PWID;
+
+ /* ECC logic is disabled */
+ pcr &= ~FMC2_PCR_ECCEN;
+
+ /* Default mode */
+ pcr &= ~FMC2_PCR_ECCALG;
+ pcr &= ~FMC2_PCR_BCHECC;
+ pcr &= ~FMC2_PCR_WEN;
+
+ /* Set default ECC sector size */
+ pcr &= ~FMC2_PCR_ECCSS;
+ pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_2048);
+
+ /* Set default tclr/tar timings */
+ pcr &= ~FMC2_PCR_TCLR;
+ pcr |= FIELD_PREP(FMC2_PCR_TCLR, FMC2_PCR_TCLR_DEFAULT);
+ pcr &= ~FMC2_PCR_TAR;
+ pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT);
+
+ /* Enable FMC2 controller */
+ if (nfc->dev == nfc->cdev)
+ regmap_update_bits(nfc->regmap, FMC2_BCR1,
+ FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN);
+
+ regmap_write(nfc->regmap, FMC2_PCR, pcr);
+ regmap_write(nfc->regmap, FMC2_PMEM, FMC2_PMEM_DEFAULT);
+ regmap_write(nfc->regmap, FMC2_PATT, FMC2_PATT_DEFAULT);
+}
+
+static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip,
+ const struct nand_sdr_timings *sdrt)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct stm32_fmc2_timings *tims = &nand->timings;
+ unsigned long hclk = clk_get_rate(nfc->clk);
+ unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
+ unsigned long timing, tar, tclr, thiz, twait;
+ unsigned long tset_mem, tset_att, thold_mem, thold_att;
+
+ tar = max_t(unsigned long, hclkp, sdrt->tAR_min);
+ timing = DIV_ROUND_UP(tar, hclkp) - 1;
+ tims->tar = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
+
+ tclr = max_t(unsigned long, hclkp, sdrt->tCLR_min);
+ timing = DIV_ROUND_UP(tclr, hclkp) - 1;
+ tims->tclr = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
+
+ tims->thiz = FMC2_THIZ;
+ thiz = (tims->thiz + 1) * hclkp;
+
+ /*
+ * tWAIT > tRP
+ * tWAIT > tWP
+ * tWAIT > tREA + tIO
+ */
+ twait = max_t(unsigned long, hclkp, sdrt->tRP_min);
+ twait = max_t(unsigned long, twait, sdrt->tWP_min);
+ twait = max_t(unsigned long, twait, sdrt->tREA_max + FMC2_TIO);
+ timing = DIV_ROUND_UP(twait, hclkp);
+ tims->twait = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tSETUP_MEM > tCS - tWAIT
+ * tSETUP_MEM > tALS - tWAIT
+ * tSETUP_MEM > tDS - (tWAIT - tHIZ)
+ */
+ tset_mem = hclkp;
+ if (sdrt->tCS_min > twait && (tset_mem < sdrt->tCS_min - twait))
+ tset_mem = sdrt->tCS_min - twait;
+ if (sdrt->tALS_min > twait && (tset_mem < sdrt->tALS_min - twait))
+ tset_mem = sdrt->tALS_min - twait;
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_mem < sdrt->tDS_min - (twait - thiz)))
+ tset_mem = sdrt->tDS_min - (twait - thiz);
+ timing = DIV_ROUND_UP(tset_mem, hclkp);
+ tims->tset_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tHOLD_MEM > tCH
+ * tHOLD_MEM > tREH - tSETUP_MEM
+ * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT)
+ */
+ thold_mem = max_t(unsigned long, hclkp, sdrt->tCH_min);
+ if (sdrt->tREH_min > tset_mem &&
+ (thold_mem < sdrt->tREH_min - tset_mem))
+ thold_mem = sdrt->tREH_min - tset_mem;
+ if ((sdrt->tRC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tRC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tRC_min - (tset_mem + twait);
+ if ((sdrt->tWC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tWC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tWC_min - (tset_mem + twait);
+ timing = DIV_ROUND_UP(thold_mem, hclkp);
+ tims->thold_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tSETUP_ATT > tCS - tWAIT
+ * tSETUP_ATT > tCLS - tWAIT
+ * tSETUP_ATT > tALS - tWAIT
+ * tSETUP_ATT > tRHW - tHOLD_MEM
+ * tSETUP_ATT > tDS - (tWAIT - tHIZ)
+ */
+ tset_att = hclkp;
+ if (sdrt->tCS_min > twait && (tset_att < sdrt->tCS_min - twait))
+ tset_att = sdrt->tCS_min - twait;
+ if (sdrt->tCLS_min > twait && (tset_att < sdrt->tCLS_min - twait))
+ tset_att = sdrt->tCLS_min - twait;
+ if (sdrt->tALS_min > twait && (tset_att < sdrt->tALS_min - twait))
+ tset_att = sdrt->tALS_min - twait;
+ if (sdrt->tRHW_min > thold_mem &&
+ (tset_att < sdrt->tRHW_min - thold_mem))
+ tset_att = sdrt->tRHW_min - thold_mem;
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_att < sdrt->tDS_min - (twait - thiz)))
+ tset_att = sdrt->tDS_min - (twait - thiz);
+ timing = DIV_ROUND_UP(tset_att, hclkp);
+ tims->tset_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tHOLD_ATT > tALH
+ * tHOLD_ATT > tCH
+ * tHOLD_ATT > tCLH
+ * tHOLD_ATT > tCOH
+ * tHOLD_ATT > tDH
+ * tHOLD_ATT > tWB + tIO + tSYNC - tSETUP_MEM
+ * tHOLD_ATT > tADL - tSETUP_MEM
+ * tHOLD_ATT > tWH - tSETUP_MEM
+ * tHOLD_ATT > tWHR - tSETUP_MEM
+ * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT)
+ * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT)
+ */
+ thold_att = max_t(unsigned long, hclkp, sdrt->tALH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tCH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tCLH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tCOH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tDH_min);
+ if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) &&
+ (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem))
+ thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem;
+ if (sdrt->tADL_min > tset_mem &&
+ (thold_att < sdrt->tADL_min - tset_mem))
+ thold_att = sdrt->tADL_min - tset_mem;
+ if (sdrt->tWH_min > tset_mem &&
+ (thold_att < sdrt->tWH_min - tset_mem))
+ thold_att = sdrt->tWH_min - tset_mem;
+ if (sdrt->tWHR_min > tset_mem &&
+ (thold_att < sdrt->tWHR_min - tset_mem))
+ thold_att = sdrt->tWHR_min - tset_mem;
+ if ((sdrt->tRC_min > tset_att + twait) &&
+ (thold_att < sdrt->tRC_min - (tset_att + twait)))
+ thold_att = sdrt->tRC_min - (tset_att + twait);
+ if ((sdrt->tWC_min > tset_att + twait) &&
+ (thold_att < sdrt->tWC_min - (tset_att + twait)))
+ thold_att = sdrt->tWC_min - (tset_att + twait);
+ timing = DIV_ROUND_UP(thold_att, hclkp);
+ tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+}
+
+static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ const struct nand_sdr_timings *sdrt;
+
+ sdrt = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdrt))
+ return PTR_ERR(sdrt);
+
+ if (conf->timings.mode > 3)
+ return -EOPNOTSUPP;
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ stm32_fmc2_nfc_calc_timings(chip, sdrt);
+ stm32_fmc2_nfc_timings_init(chip);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
+{
+ int ret = 0;
+
+ nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
+ if (IS_ERR(nfc->dma_tx_ch)) {
+ ret = PTR_ERR(nfc->dma_tx_ch);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(nfc->dev,
+ "failed to request tx DMA channel: %d\n", ret);
+ nfc->dma_tx_ch = NULL;
+ goto err_dma;
+ }
+
+ nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
+ if (IS_ERR(nfc->dma_rx_ch)) {
+ ret = PTR_ERR(nfc->dma_rx_ch);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(nfc->dev,
+ "failed to request rx DMA channel: %d\n", ret);
+ nfc->dma_rx_ch = NULL;
+ goto err_dma;
+ }
+
+ nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
+ if (IS_ERR(nfc->dma_ecc_ch)) {
+ ret = PTR_ERR(nfc->dma_ecc_ch);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(nfc->dev,
+ "failed to request ecc DMA channel: %d\n", ret);
+ nfc->dma_ecc_ch = NULL;
+ goto err_dma;
+ }
+
+ ret = sg_alloc_table(&nfc->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ /* Allocate a buffer to store ECC status registers */
+ nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
+ if (!nfc->ecc_buf)
+ return -ENOMEM;
+
+ ret = sg_alloc_table(&nfc->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ init_completion(&nfc->dma_data_complete);
+ init_completion(&nfc->dma_ecc_complete);
+
+ return 0;
+
+err_dma:
+ if (ret == -ENODEV) {
+ dev_warn(nfc->dev,
+ "DMAs not defined in the DT, polling mode is used\n");
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void stm32_fmc2_nfc_nand_callbacks_setup(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+
+ /*
+ * Specific callbacks to read/write a page depending on
+ * the mode (polling/sequencer) and the algo used (Hamming, BCH).
+ */
+ if (nfc->dma_tx_ch && nfc->dma_rx_ch && nfc->dma_ecc_ch) {
+ /* DMA => use sequencer mode callbacks */
+ chip->ecc.correct = stm32_fmc2_nfc_seq_correct;
+ chip->ecc.write_page = stm32_fmc2_nfc_seq_write_page;
+ chip->ecc.read_page = stm32_fmc2_nfc_seq_read_page;
+ chip->ecc.write_page_raw = stm32_fmc2_nfc_seq_write_page_raw;
+ chip->ecc.read_page_raw = stm32_fmc2_nfc_seq_read_page_raw;
+ } else {
+ /* No DMA => use polling mode callbacks */
+ chip->ecc.hwctl = stm32_fmc2_nfc_hwctl;
+ if (chip->ecc.strength == FMC2_ECC_HAM) {
+ /* Hamming is used */
+ chip->ecc.calculate = stm32_fmc2_nfc_ham_calculate;
+ chip->ecc.correct = stm32_fmc2_nfc_ham_correct;
+ chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK;
+ } else {
+ /* BCH is used */
+ chip->ecc.calculate = stm32_fmc2_nfc_bch_calculate;
+ chip->ecc.correct = stm32_fmc2_nfc_bch_correct;
+ chip->ecc.read_page = stm32_fmc2_nfc_read_page;
+ }
+ }
+
+ /* Specific configurations depending on the algo used */
+ if (chip->ecc.strength == FMC2_ECC_HAM)
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 4 : 3;
+ else if (chip->ecc.strength == FMC2_ECC_BCH8)
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 14 : 13;
+ else
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7;
+}
+
+static int stm32_fmc2_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = FMC2_BBM_LEN;
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - FMC2_BBM_LEN;
+ oobregion->offset = ecc->total + FMC2_BBM_LEN;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops stm32_fmc2_nfc_ooblayout_ops = {
+ .ecc = stm32_fmc2_nfc_ooblayout_ecc,
+ .free = stm32_fmc2_nfc_ooblayout_free,
+};
+
+static int stm32_fmc2_nfc_calc_ecc_bytes(int step_size, int strength)
+{
+ /* Hamming */
+ if (strength == FMC2_ECC_HAM)
+ return 4;
+
+ /* BCH8 */
+ if (strength == FMC2_ECC_BCH8)
+ return 14;
+
+ /* BCH4 */
+ return 8;
+}
+
+NAND_ECC_CAPS_SINGLE(stm32_fmc2_nfc_ecc_caps, stm32_fmc2_nfc_calc_ecc_bytes,
+ FMC2_ECC_STEP_SIZE,
+ FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8);
+
+static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /*
+ * Only NAND_ECC_ENGINE_TYPE_ON_HOST mode is actually supported
+ * Hamming => ecc.strength = 1
+ * BCH4 => ecc.strength = 4
+ * BCH8 => ecc.strength = 8
+ * ECC sector size = 512
+ */
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ dev_err(nfc->dev,
+ "nand_ecc_engine_type is not well defined in the DT\n");
+ return -EINVAL;
+ }
+
+ /* Default ECC settings in case they are not set in the device tree */
+ if (!chip->ecc.size)
+ chip->ecc.size = FMC2_ECC_STEP_SIZE;
+
+ if (!chip->ecc.strength)
+ chip->ecc.strength = FMC2_ECC_BCH8;
+
+ ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps,
+ mtd->oobsize - FMC2_BBM_LEN);
+ if (ret) {
+ dev_err(nfc->dev, "no valid ECC settings set\n");
+ return ret;
+ }
+
+ if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) {
+ dev_err(nfc->dev, "nand page size is not supported\n");
+ return -EINVAL;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ stm32_fmc2_nfc_nand_callbacks_setup(chip);
+
+ mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops);
+
+ stm32_fmc2_nfc_setup(chip);
+
+ return 0;
+}
+
+static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = {
+ .attach_chip = stm32_fmc2_nfc_attach_chip,
+ .exec_op = stm32_fmc2_nfc_exec_op,
+ .setup_interface = stm32_fmc2_nfc_setup_interface,
+};
+
+static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
+ struct device_node *dn)
+{
+ struct stm32_fmc2_nand *nand = &nfc->nand;
+ u32 cs;
+ int ret, i;
+
+ if (!of_get_property(dn, "reg", &nand->ncs))
+ return -EINVAL;
+
+ nand->ncs /= sizeof(u32);
+ if (!nand->ncs) {
+ dev_err(nfc->dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nand->ncs; i++) {
+ ret = of_property_read_u32_index(dn, "reg", i, &cs);
+ if (ret) {
+ dev_err(nfc->dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs >= FMC2_MAX_CE) {
+ dev_err(nfc->dev, "invalid reg value: %d\n", cs);
+ return -EINVAL;
+ }
+
+ if (nfc->cs_assigned & BIT(cs)) {
+ dev_err(nfc->dev, "cs already assigned: %d\n", cs);
+ return -EINVAL;
+ }
+
+ nfc->cs_assigned |= BIT(cs);
+ nand->cs_used[i] = cs;
+ }
+
+ nand_set_flash_node(&nand->chip, dn);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
+{
+ struct device_node *dn = nfc->dev->of_node;
+ struct device_node *child;
+ int nchips = of_get_child_count(dn);
+ int ret = 0;
+
+ if (!nchips) {
+ dev_err(nfc->dev, "NAND chip not defined\n");
+ return -EINVAL;
+ }
+
+ if (nchips > 1) {
+ dev_err(nfc->dev, "too many NAND chips defined\n");
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(dn, child) {
+ ret = stm32_fmc2_nfc_parse_child(nfc, child);
+ if (ret < 0) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int stm32_fmc2_nfc_set_cdev(struct stm32_fmc2_nfc *nfc)
+{
+ struct device *dev = nfc->dev;
+ bool ebi_found = false;
+
+ if (dev->parent && of_device_is_compatible(dev->parent->of_node,
+ "st,stm32mp1-fmc2-ebi"))
+ ebi_found = true;
+
+ if (of_device_is_compatible(dev->of_node, "st,stm32mp1-fmc2-nfc")) {
+ if (ebi_found) {
+ nfc->cdev = dev->parent;
+
+ return 0;
+ }
+
+ return -EINVAL;
+ }
+
+ if (ebi_found)
+ return -EINVAL;
+
+ nfc->cdev = dev;
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *rstc;
+ struct stm32_fmc2_nfc *nfc;
+ struct stm32_fmc2_nand *nand;
+ struct resource *res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource cres;
+ int chip_cs, mem_region, ret, irq;
+ int start_region = 0;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_controller_init(&nfc->base);
+ nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
+
+ ret = stm32_fmc2_nfc_set_cdev(nfc);
+ if (ret)
+ return ret;
+
+ ret = stm32_fmc2_nfc_parse_dt(nfc);
+ if (ret)
+ return ret;
+
+ ret = of_address_to_resource(nfc->cdev->of_node, 0, &cres);
+ if (ret)
+ return ret;
+
+ nfc->io_phys_addr = cres.start;
+
+ nfc->regmap = device_node_to_regmap(nfc->cdev->of_node);
+ if (IS_ERR(nfc->regmap))
+ return PTR_ERR(nfc->regmap);
+
+ if (nfc->dev == nfc->cdev)
+ start_region = 1;
+
+ for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE;
+ chip_cs++, mem_region += 3) {
+ if (!(nfc->cs_assigned & BIT(chip_cs)))
+ continue;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region);
+ nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->data_base[chip_cs]))
+ return PTR_ERR(nfc->data_base[chip_cs]);
+
+ nfc->data_phys_addr[chip_cs] = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ mem_region + 1);
+ nfc->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->cmd_base[chip_cs]))
+ return PTR_ERR(nfc->cmd_base[chip_cs]);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ mem_region + 2);
+ nfc->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->addr_base[chip_cs]))
+ return PTR_ERR(nfc->addr_base[chip_cs]);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, stm32_fmc2_nfc_irq, 0,
+ dev_name(dev), nfc);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ init_completion(&nfc->complete);
+
+ nfc->clk = devm_clk_get(nfc->cdev, NULL);
+ if (IS_ERR(nfc->clk))
+ return PTR_ERR(nfc->clk);
+
+ ret = clk_prepare_enable(nfc->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ rstc = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ if (ret == -EPROBE_DEFER)
+ goto err_clk_disable;
+ } else {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
+ }
+
+ ret = stm32_fmc2_nfc_dma_setup(nfc);
+ if (ret)
+ goto err_release_dma;
+
+ stm32_fmc2_nfc_init(nfc);
+
+ nand = &nfc->nand;
+ chip = &nand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ chip->controller = &nfc->base;
+ chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
+ NAND_USES_DMA;
+
+ /* Scan to find existence of the device */
+ ret = nand_scan(chip, nand->ncs);
+ if (ret)
+ goto err_release_dma;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_nand_cleanup;
+
+ platform_set_drvdata(pdev, nfc);
+
+ return 0;
+
+err_nand_cleanup:
+ nand_cleanup(chip);
+
+err_release_dma:
+ if (nfc->dma_ecc_ch)
+ dma_release_channel(nfc->dma_ecc_ch);
+ if (nfc->dma_tx_ch)
+ dma_release_channel(nfc->dma_tx_ch);
+ if (nfc->dma_rx_ch)
+ dma_release_channel(nfc->dma_rx_ch);
+
+ sg_free_table(&nfc->dma_data_sg);
+ sg_free_table(&nfc->dma_ecc_sg);
+
+err_clk_disable:
+ clk_disable_unprepare(nfc->clk);
+
+ return ret;
+}
+
+static int stm32_fmc2_nfc_remove(struct platform_device *pdev)
+{
+ struct stm32_fmc2_nfc *nfc = platform_get_drvdata(pdev);
+ struct stm32_fmc2_nand *nand = &nfc->nand;
+ struct nand_chip *chip = &nand->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ if (nfc->dma_ecc_ch)
+ dma_release_channel(nfc->dma_ecc_ch);
+ if (nfc->dma_tx_ch)
+ dma_release_channel(nfc->dma_tx_ch);
+ if (nfc->dma_rx_ch)
+ dma_release_channel(nfc->dma_rx_ch);
+
+ sg_free_table(&nfc->dma_data_sg);
+ sg_free_table(&nfc->dma_ecc_sg);
+
+ clk_disable_unprepare(nfc->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_nfc_suspend(struct device *dev)
+{
+ struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(nfc->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
+{
+ struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
+ struct stm32_fmc2_nand *nand = &nfc->nand;
+ int chip_cs, ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = clk_prepare_enable(nfc->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ stm32_fmc2_nfc_init(nfc);
+
+ for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
+ if (!(nfc->cs_assigned & BIT(chip_cs)))
+ continue;
+
+ nand_reset(&nand->chip, chip_cs);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
+ stm32_fmc2_nfc_resume);
+
+static const struct of_device_id stm32_fmc2_nfc_match[] = {
+ {.compatible = "st,stm32mp15-fmc2"},
+ {.compatible = "st,stm32mp1-fmc2-nfc"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
+
+static struct platform_driver stm32_fmc2_nfc_driver = {
+ .probe = stm32_fmc2_nfc_probe,
+ .remove = stm32_fmc2_nfc_remove,
+ .driver = {
+ .name = "stm32_fmc2_nfc",
+ .of_match_table = stm32_fmc2_nfc_match,
+ .pm = &stm32_fmc2_nfc_pm_ops,
+ },
+};
+module_platform_driver(stm32_fmc2_nfc_driver);
+
+MODULE_ALIAS("platform:stm32_fmc2_nfc");
+MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
new file mode 100644
index 000000000..52eb28f32
--- /dev/null
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -0,0 +1,2236 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
+ *
+ * Derived from:
+ * https://github.com/yuq/sunxi-nfc-mtd
+ * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
+ *
+ * https://github.com/hno/Allwinner-Info
+ * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
+ *
+ * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
+ * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#define NFC_REG_CTL 0x0000
+#define NFC_REG_ST 0x0004
+#define NFC_REG_INT 0x0008
+#define NFC_REG_TIMING_CTL 0x000C
+#define NFC_REG_TIMING_CFG 0x0010
+#define NFC_REG_ADDR_LOW 0x0014
+#define NFC_REG_ADDR_HIGH 0x0018
+#define NFC_REG_SECTOR_NUM 0x001C
+#define NFC_REG_CNT 0x0020
+#define NFC_REG_CMD 0x0024
+#define NFC_REG_RCMD_SET 0x0028
+#define NFC_REG_WCMD_SET 0x002C
+#define NFC_REG_A10_IO_DATA 0x0030
+#define NFC_REG_A23_IO_DATA 0x0300
+#define NFC_REG_ECC_CTL 0x0034
+#define NFC_REG_ECC_ST 0x0038
+#define NFC_REG_DEBUG 0x003C
+#define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
+#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
+#define NFC_REG_SPARE_AREA 0x00A0
+#define NFC_REG_PAT_ID 0x00A4
+#define NFC_REG_MDMA_CNT 0x00C4
+#define NFC_RAM0_BASE 0x0400
+#define NFC_RAM1_BASE 0x0800
+
+/* define bit use in NFC_CTL */
+#define NFC_EN BIT(0)
+#define NFC_RESET BIT(1)
+#define NFC_BUS_WIDTH_MSK BIT(2)
+#define NFC_BUS_WIDTH_8 (0 << 2)
+#define NFC_BUS_WIDTH_16 (1 << 2)
+#define NFC_RB_SEL_MSK BIT(3)
+#define NFC_RB_SEL(x) ((x) << 3)
+#define NFC_CE_SEL_MSK GENMASK(26, 24)
+#define NFC_CE_SEL(x) ((x) << 24)
+#define NFC_CE_CTL BIT(6)
+#define NFC_PAGE_SHIFT_MSK GENMASK(11, 8)
+#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
+#define NFC_SAM BIT(12)
+#define NFC_RAM_METHOD BIT(14)
+#define NFC_DMA_TYPE_NORMAL BIT(15)
+#define NFC_DEBUG_CTL BIT(31)
+
+/* define bit use in NFC_ST */
+#define NFC_RB_B2R BIT(0)
+#define NFC_CMD_INT_FLAG BIT(1)
+#define NFC_DMA_INT_FLAG BIT(2)
+#define NFC_CMD_FIFO_STATUS BIT(3)
+#define NFC_STA BIT(4)
+#define NFC_NATCH_INT_FLAG BIT(5)
+#define NFC_RB_STATE(x) BIT(x + 8)
+
+/* define bit use in NFC_INT */
+#define NFC_B2R_INT_ENABLE BIT(0)
+#define NFC_CMD_INT_ENABLE BIT(1)
+#define NFC_DMA_INT_ENABLE BIT(2)
+#define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
+ NFC_CMD_INT_ENABLE | \
+ NFC_DMA_INT_ENABLE)
+
+/* define bit use in NFC_TIMING_CTL */
+#define NFC_TIMING_CTL_EDO BIT(8)
+
+/* define NFC_TIMING_CFG register layout */
+#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
+ (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
+ (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
+ (((tCAD) & 0x7) << 8))
+
+/* define bit use in NFC_CMD */
+#define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0)
+#define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8)
+#define NFC_CMD(x) (x)
+#define NFC_ADR_NUM_MSK GENMASK(18, 16)
+#define NFC_ADR_NUM(x) (((x) - 1) << 16)
+#define NFC_SEND_ADR BIT(19)
+#define NFC_ACCESS_DIR BIT(20)
+#define NFC_DATA_TRANS BIT(21)
+#define NFC_SEND_CMD1 BIT(22)
+#define NFC_WAIT_FLAG BIT(23)
+#define NFC_SEND_CMD2 BIT(24)
+#define NFC_SEQ BIT(25)
+#define NFC_DATA_SWAP_METHOD BIT(26)
+#define NFC_ROW_AUTO_INC BIT(27)
+#define NFC_SEND_CMD3 BIT(28)
+#define NFC_SEND_CMD4 BIT(29)
+#define NFC_CMD_TYPE_MSK GENMASK(31, 30)
+#define NFC_NORMAL_OP (0 << 30)
+#define NFC_ECC_OP (1 << 30)
+#define NFC_PAGE_OP (2U << 30)
+
+/* define bit use in NFC_RCMD_SET */
+#define NFC_READ_CMD_MSK GENMASK(7, 0)
+#define NFC_RND_READ_CMD0_MSK GENMASK(15, 8)
+#define NFC_RND_READ_CMD1_MSK GENMASK(23, 16)
+
+/* define bit use in NFC_WCMD_SET */
+#define NFC_PROGRAM_CMD_MSK GENMASK(7, 0)
+#define NFC_RND_WRITE_CMD_MSK GENMASK(15, 8)
+#define NFC_READ_CMD0_MSK GENMASK(23, 16)
+#define NFC_READ_CMD1_MSK GENMASK(31, 24)
+
+/* define bit use in NFC_ECC_CTL */
+#define NFC_ECC_EN BIT(0)
+#define NFC_ECC_PIPELINE BIT(3)
+#define NFC_ECC_EXCEPTION BIT(4)
+#define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
+#define NFC_ECC_BLOCK_512 BIT(5)
+#define NFC_RANDOM_EN BIT(9)
+#define NFC_RANDOM_DIRECTION BIT(10)
+#define NFC_ECC_MODE_MSK GENMASK(15, 12)
+#define NFC_ECC_MODE(x) ((x) << 12)
+#define NFC_RANDOM_SEED_MSK GENMASK(30, 16)
+#define NFC_RANDOM_SEED(x) ((x) << 16)
+
+/* define bit use in NFC_ECC_ST */
+#define NFC_ECC_ERR(x) BIT(x)
+#define NFC_ECC_ERR_MSK GENMASK(15, 0)
+#define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
+#define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
+
+#define NFC_DEFAULT_TIMEOUT_MS 1000
+
+#define NFC_SRAM_SIZE 1024
+
+#define NFC_MAX_CS 7
+
+/**
+ * struct sunxi_nand_chip_sel - stores information related to NAND Chip Select
+ *
+ * @cs: the NAND CS id used to communicate with a NAND Chip
+ * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the NFC
+ */
+struct sunxi_nand_chip_sel {
+ u8 cs;
+ s8 rb;
+};
+
+/**
+ * struct sunxi_nand_hw_ecc - stores information related to HW ECC support
+ *
+ * @mode: the sunxi ECC mode field deduced from ECC requirements
+ */
+struct sunxi_nand_hw_ecc {
+ int mode;
+};
+
+/**
+ * struct sunxi_nand_chip - stores NAND chip device related information
+ *
+ * @node: used to store NAND chips into a list
+ * @nand: base NAND chip structure
+ * @clk_rate: clk_rate required for this NAND chip
+ * @timing_cfg: TIMING_CFG register value for this NAND chip
+ * @timing_ctl: TIMING_CTL register value for this NAND chip
+ * @nsels: number of CS lines required by the NAND chip
+ * @sels: array of CS lines descriptions
+ */
+struct sunxi_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ unsigned long clk_rate;
+ u32 timing_cfg;
+ u32 timing_ctl;
+ int nsels;
+ struct sunxi_nand_chip_sel sels[];
+};
+
+static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct sunxi_nand_chip, nand);
+}
+
+/*
+ * NAND Controller capabilities structure: stores NAND controller capabilities
+ * for distinction between compatible strings.
+ *
+ * @extra_mbus_conf: Contrary to A10, A10s and A13, accessing internal RAM
+ * through MBUS on A23/A33 needs extra configuration.
+ * @reg_io_data: I/O data register
+ * @dma_maxburst: DMA maxburst
+ */
+struct sunxi_nfc_caps {
+ bool extra_mbus_conf;
+ unsigned int reg_io_data;
+ unsigned int dma_maxburst;
+};
+
+/**
+ * struct sunxi_nfc - stores sunxi NAND controller information
+ *
+ * @controller: base controller structure
+ * @dev: parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ahb_clk: NAND controller AHB clock
+ * @mod_clk: NAND controller mod clock
+ * @reset: NAND controller reset line
+ * @assigned_cs: bitmask describing already assigned CS lines
+ * @clk_rate: NAND controller current clock rate
+ * @chips: a list containing all the NAND chips attached to this NAND
+ * controller
+ * @complete: a completion object used to wait for NAND controller events
+ * @dmac: the DMA channel attached to the NAND controller
+ */
+struct sunxi_nfc {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ahb_clk;
+ struct clk *mod_clk;
+ struct reset_control *reset;
+ unsigned long assigned_cs;
+ unsigned long clk_rate;
+ struct list_head chips;
+ struct completion complete;
+ struct dma_chan *dmac;
+ const struct sunxi_nfc_caps *caps;
+};
+
+static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct sunxi_nfc, controller);
+}
+
+static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
+{
+ struct sunxi_nfc *nfc = dev_id;
+ u32 st = readl(nfc->regs + NFC_REG_ST);
+ u32 ien = readl(nfc->regs + NFC_REG_INT);
+
+ if (!(ien & st))
+ return IRQ_NONE;
+
+ if ((ien & st) == ien)
+ complete(&nfc->complete);
+
+ writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+ writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
+
+ return IRQ_HANDLED;
+}
+
+static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
+ bool use_polling, unsigned int timeout_ms)
+{
+ int ret;
+
+ if (events & ~NFC_INT_MASK)
+ return -EINVAL;
+
+ if (!timeout_ms)
+ timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
+
+ if (!use_polling) {
+ init_completion(&nfc->complete);
+
+ writel(events, nfc->regs + NFC_REG_INT);
+
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+
+ writel(0, nfc->regs + NFC_REG_INT);
+ } else {
+ u32 status;
+
+ ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
+ (status & events) == events, 1,
+ timeout_ms * 1000);
+ }
+
+ writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+
+ if (ret)
+ dev_err(nfc->dev, "wait interrupt timedout\n");
+
+ return ret;
+}
+
+static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
+{
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
+ !(status & NFC_CMD_FIFO_STATUS), 1,
+ NFC_DEFAULT_TIMEOUT_MS * 1000);
+ if (ret)
+ dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
+
+ return ret;
+}
+
+static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
+{
+ u32 ctl;
+ int ret;
+
+ writel(0, nfc->regs + NFC_REG_ECC_CTL);
+ writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
+
+ ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
+ !(ctl & NFC_RESET), 1,
+ NFC_DEFAULT_TIMEOUT_MS * 1000);
+ if (ret)
+ dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
+
+ return ret;
+}
+
+static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
+ int chunksize, int nchunks,
+ enum dma_data_direction ddir,
+ struct scatterlist *sg)
+{
+ struct dma_async_tx_descriptor *dmad;
+ enum dma_transfer_direction tdir;
+ dma_cookie_t dmat;
+ int ret;
+
+ if (ddir == DMA_FROM_DEVICE)
+ tdir = DMA_DEV_TO_MEM;
+ else
+ tdir = DMA_MEM_TO_DEV;
+
+ sg_init_one(sg, buf, nchunks * chunksize);
+ ret = dma_map_sg(nfc->dev, sg, 1, ddir);
+ if (!ret)
+ return -ENOMEM;
+
+ dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
+ if (!dmad) {
+ ret = -EINVAL;
+ goto err_unmap_buf;
+ }
+
+ writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+ writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
+ writel(chunksize, nfc->regs + NFC_REG_CNT);
+ if (nfc->caps->extra_mbus_conf)
+ writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT);
+
+ dmat = dmaengine_submit(dmad);
+
+ ret = dma_submit_error(dmat);
+ if (ret)
+ goto err_clr_dma_flag;
+
+ return 0;
+
+err_clr_dma_flag:
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+
+err_unmap_buf:
+ dma_unmap_sg(nfc->dev, sg, 1, ddir);
+ return ret;
+}
+
+static void sunxi_nfc_dma_op_cleanup(struct sunxi_nfc *nfc,
+ enum dma_data_direction ddir,
+ struct scatterlist *sg)
+{
+ dma_unmap_sg(nfc->dev, sg, 1, ddir);
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+}
+
+static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_chip_sel *sel;
+ u32 ctl;
+
+ if (cs > 0 && cs >= sunxi_nand->nsels)
+ return;
+
+ ctl = readl(nfc->regs + NFC_REG_CTL) &
+ ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
+
+ sel = &sunxi_nand->sels[cs];
+ ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | NFC_PAGE_SHIFT(nand->page_shift);
+ if (sel->rb >= 0)
+ ctl |= NFC_RB_SEL(sel->rb);
+
+ writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
+
+ if (nfc->clk_rate != sunxi_nand->clk_rate) {
+ clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
+ nfc->clk_rate = sunxi_nand->clk_rate;
+ }
+
+ writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
+ writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
+ writel(ctl, nfc->regs + NFC_REG_CTL);
+}
+
+static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ bool poll = false;
+
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ /* Arbitrary limit for polling mode */
+ if (cnt < 64)
+ poll = true;
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
+ if (ret)
+ break;
+
+ if (buf)
+ memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
+ cnt);
+ offs += cnt;
+ }
+}
+
+static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf,
+ int len)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ bool poll = false;
+
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+ NFC_ACCESS_DIR;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ /* Arbitrary limit for polling mode */
+ if (cnt < 64)
+ poll = true;
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
+ if (ret)
+ break;
+
+ offs += cnt;
+ }
+}
+
+/* These seed values have been extracted from Allwinner's BSP */
+static const u16 sunxi_nfc_randomizer_page_seeds[] = {
+ 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
+ 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
+ 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
+ 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
+ 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
+ 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
+ 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
+ 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
+ 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
+ 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
+ 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
+ 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
+ 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
+ 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
+ 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
+ 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
+};
+
+/*
+ * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
+ * have been generated using
+ * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
+ * the randomizer engine does internally before de/scrambling OOB data.
+ *
+ * Those tables are statically defined to avoid calculating randomizer state
+ * at runtime.
+ */
+static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
+ 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
+ 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
+ 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
+ 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
+ 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
+ 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
+ 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
+ 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
+ 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
+ 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
+ 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
+ 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
+ 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
+ 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
+ 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
+ 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
+};
+
+static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
+ 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
+ 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
+ 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
+ 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
+ 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
+ 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
+ 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
+ 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
+ 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
+ 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
+ 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
+ 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
+ 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
+ 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
+ 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
+ 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
+};
+
+static u16 sunxi_nfc_randomizer_step(u16 state, int count)
+{
+ state &= 0x7fff;
+
+ /*
+ * This loop is just a simple implementation of a Fibonacci LFSR using
+ * the x16 + x15 + 1 polynomial.
+ */
+ while (count--)
+ state = ((state >> 1) |
+ (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
+
+ return state;
+}
+
+static u16 sunxi_nfc_randomizer_state(struct nand_chip *nand, int page,
+ bool ecc)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
+ int mod = mtd_div_by_ws(mtd->erasesize, mtd);
+
+ if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
+ mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
+
+ if (ecc) {
+ if (mtd->ecc_step_size == 512)
+ seeds = sunxi_nfc_randomizer_ecc512_seeds;
+ else
+ seeds = sunxi_nfc_randomizer_ecc1024_seeds;
+ }
+
+ return seeds[page % mod];
+}
+
+static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page,
+ bool ecc)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ u16 state;
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ state = sunxi_nfc_randomizer_state(nand, page, ecc);
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
+ writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_enable(struct nand_chip *nand)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_disable(struct nand_chip *nand)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomize_bbm(struct nand_chip *nand, int page, u8 *bbm)
+{
+ u16 state = sunxi_nfc_randomizer_state(nand, page, true);
+
+ bbm[0] ^= state;
+ bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
+}
+
+static void sunxi_nfc_randomizer_write_buf(struct nand_chip *nand,
+ const uint8_t *buf, int len,
+ bool ecc, int page)
+{
+ sunxi_nfc_randomizer_config(nand, page, ecc);
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_write_buf(nand, buf, len);
+ sunxi_nfc_randomizer_disable(nand);
+}
+
+static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf,
+ int len, bool ecc, int page)
+{
+ sunxi_nfc_randomizer_config(nand, page, ecc);
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_read_buf(nand, buf, len);
+ sunxi_nfc_randomizer_disable(nand);
+}
+
+static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
+ u32 ecc_ctl;
+
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
+ NFC_ECC_BLOCK_SIZE_MSK);
+ ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
+ NFC_ECC_PIPELINE;
+
+ if (nand->ecc.size == 512)
+ ecc_ctl |= NFC_ECC_BLOCK_512;
+
+ writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_hw_ecc_disable(struct nand_chip *nand)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
+{
+ buf[0] = user_data;
+ buf[1] = user_data >> 8;
+ buf[2] = user_data >> 16;
+ buf[3] = user_data >> 24;
+}
+
+static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
+{
+ return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
+}
+
+static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct nand_chip *nand, u8 *oob,
+ int step, bool bbm, int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
+ oob);
+
+ /* De-randomize the Bad Block Marker. */
+ if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_randomize_bbm(nand, page, oob);
+}
+
+static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand,
+ const u8 *oob, int step,
+ bool bbm, int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u8 user_data[4];
+
+ /* Randomize the Bad Block Marker. */
+ if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
+ memcpy(user_data, oob, sizeof(user_data));
+ sunxi_nfc_randomize_bbm(nand, page, user_data);
+ oob = user_data;
+ }
+
+ writel(sunxi_nfc_buf_to_user_data(oob),
+ nfc->regs + NFC_REG_USER_DATA(step));
+}
+
+static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand,
+ unsigned int *max_bitflips, int ret)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ }
+}
+
+static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob,
+ int step, u32 status, bool *erased)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ u32 tmp;
+
+ *erased = false;
+
+ if (status & NFC_ECC_ERR(step))
+ return -EBADMSG;
+
+ if (status & NFC_ECC_PAT_FOUND(step)) {
+ u8 pattern;
+
+ if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
+ pattern = 0x0;
+ } else {
+ pattern = 0xff;
+ *erased = true;
+ }
+
+ if (data)
+ memset(data, pattern, ecc->size);
+
+ if (oob)
+ memset(oob, pattern, ecc->bytes + 4);
+
+ return 0;
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
+
+ return NFC_ECC_ERR_CNT(step, tmp);
+}
+
+static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
+ u8 *data, int data_off,
+ u8 *oob, int oob_off,
+ int *cur_off,
+ unsigned int *max_bitflips,
+ bool bbm, bool oob_required, int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int raw_mode = 0;
+ bool erased;
+ int ret;
+
+ if (*cur_off != data_off)
+ nand_change_read_column_op(nand, data_off, NULL, 0, false);
+
+ sunxi_nfc_randomizer_read_buf(nand, NULL, ecc->size, false, page);
+
+ if (data_off + ecc->size != oob_off)
+ nand_change_read_column_op(nand, oob_off, NULL, 0, false);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ sunxi_nfc_randomizer_enable(nand);
+ writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
+ sunxi_nfc_randomizer_disable(nand);
+ if (ret)
+ return ret;
+
+ *cur_off = oob_off + ecc->bytes + 4;
+
+ ret = sunxi_nfc_hw_ecc_correct(nand, data, oob_required ? oob : NULL, 0,
+ readl(nfc->regs + NFC_REG_ECC_ST),
+ &erased);
+ if (erased)
+ return 1;
+
+ if (ret < 0) {
+ /*
+ * Re-read the data with the randomizer disabled to identify
+ * bitflips in erased pages.
+ */
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand_change_read_column_op(nand, data_off, data,
+ ecc->size, false);
+ else
+ memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
+ ecc->size);
+
+ nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
+ false);
+
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 4,
+ NULL, 0, ecc->strength);
+ if (ret >= 0)
+ raw_mode = 1;
+ } else {
+ memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
+
+ if (oob_required) {
+ nand_change_read_column_op(nand, oob_off, NULL, 0,
+ false);
+ sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + 4,
+ true, page);
+
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, 0,
+ bbm, page);
+ }
+ }
+
+ sunxi_nfc_hw_ecc_update_stats(nand, max_bitflips, ret);
+
+ return raw_mode;
+}
+
+static void sunxi_nfc_hw_ecc_read_extra_oob(struct nand_chip *nand,
+ u8 *oob, int *cur_off,
+ bool randomize, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int offset = ((ecc->bytes + 4) * ecc->steps);
+ int len = mtd->oobsize - offset;
+
+ if (len <= 0)
+ return;
+
+ if (!cur_off || *cur_off != offset)
+ nand_change_read_column_op(nand, mtd->writesize, NULL, 0,
+ false);
+
+ if (!randomize)
+ sunxi_nfc_read_buf(nand, oob + offset, len);
+ else
+ sunxi_nfc_randomizer_read_buf(nand, oob + offset, len,
+ false, page);
+
+ if (cur_off)
+ *cur_off = mtd->oobsize + mtd->writesize;
+}
+
+static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf,
+ int oob_required, int page,
+ int nchunks)
+{
+ bool randomized = nand->options & NAND_NEED_SCRAMBLING;
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ unsigned int max_bitflips = 0;
+ int ret, i, raw_mode = 0;
+ struct scatterlist sg;
+ u32 status;
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, nchunks,
+ DMA_FROM_DEVICE, &sg);
+ if (ret)
+ return ret;
+
+ sunxi_nfc_hw_ecc_enable(nand);
+ sunxi_nfc_randomizer_config(nand, page, false);
+ sunxi_nfc_randomizer_enable(nand);
+
+ writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
+ NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
+
+ dma_async_issue_pending(nfc->dmac);
+
+ writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
+ if (ret)
+ dmaengine_terminate_all(nfc->dmac);
+
+ sunxi_nfc_randomizer_disable(nand);
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ sunxi_nfc_dma_op_cleanup(nfc, DMA_FROM_DEVICE, &sg);
+
+ if (ret)
+ return ret;
+
+ status = readl(nfc->regs + NFC_REG_ECC_ST);
+
+ for (i = 0; i < nchunks; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+ bool erased;
+
+ ret = sunxi_nfc_hw_ecc_correct(nand, randomized ? data : NULL,
+ oob_required ? oob : NULL,
+ i, status, &erased);
+
+ /* ECC errors are handled in the second loop. */
+ if (ret < 0)
+ continue;
+
+ if (oob_required && !erased) {
+ /* TODO: use DMA to retrieve OOB */
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
+
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, i,
+ !i, page);
+ }
+
+ if (erased)
+ raw_mode = 1;
+
+ sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
+ }
+
+ if (status & NFC_ECC_ERR_MSK) {
+ for (i = 0; i < nchunks; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+
+ if (!(status & NFC_ECC_ERR(i)))
+ continue;
+
+ /*
+ * Re-read the data with the randomizer disabled to
+ * identify bitflips in erased pages.
+ * TODO: use DMA to read page in raw mode
+ */
+ if (randomized)
+ nand_change_read_column_op(nand, data_off,
+ data, ecc->size,
+ false);
+
+ /* TODO: use DMA to retrieve OOB */
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
+
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 4,
+ NULL, 0,
+ ecc->strength);
+ if (ret >= 0)
+ raw_mode = 1;
+
+ sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
+ }
+ }
+
+ if (oob_required)
+ sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi,
+ NULL, !raw_mode,
+ page);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
+ const u8 *data, int data_off,
+ const u8 *oob, int oob_off,
+ int *cur_off, bool bbm,
+ int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int ret;
+
+ if (data_off != *cur_off)
+ nand_change_write_column_op(nand, data_off, NULL, 0, false);
+
+ sunxi_nfc_randomizer_write_buf(nand, data, ecc->size, false, page);
+
+ if (data_off + ecc->size != oob_off)
+ nand_change_write_column_op(nand, oob_off, NULL, 0, false);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
+
+ writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+ NFC_ACCESS_DIR | NFC_ECC_OP,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
+ sunxi_nfc_randomizer_disable(nand);
+ if (ret)
+ return ret;
+
+ *cur_off = oob_off + ecc->bytes + 4;
+
+ return 0;
+}
+
+static void sunxi_nfc_hw_ecc_write_extra_oob(struct nand_chip *nand,
+ u8 *oob, int *cur_off,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int offset = ((ecc->bytes + 4) * ecc->steps);
+ int len = mtd->oobsize - offset;
+
+ if (len <= 0)
+ return;
+
+ if (!cur_off || *cur_off != offset)
+ nand_change_write_column_op(nand, offset + mtd->writesize,
+ NULL, 0, false);
+
+ sunxi_nfc_randomizer_write_buf(nand, oob + offset, len, false, page);
+
+ if (cur_off)
+ *cur_off = mtd->oobsize + mtd->writesize;
+}
+
+static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *nand, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ unsigned int max_bitflips = 0;
+ int ret, i, cur_off = 0;
+ bool raw_mode = false;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, &max_bitflips,
+ !i, oob_required, page);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ raw_mode = true;
+ }
+
+ if (oob_required)
+ sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, &cur_off,
+ !raw_mode, page);
+
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *nand, u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, oob_required, page,
+ nand->ecc.steps);
+ if (ret >= 0)
+ return ret;
+
+ /* Fallback to PIO mode */
+ return sunxi_nfc_hw_ecc_read_page(nand, buf, oob_required, page);
+}
+
+static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *nand,
+ u32 data_offs, u32 readlen,
+ u8 *bufpoi, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int ret, i, cur_off = 0;
+ unsigned int max_bitflips = 0;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
+
+ for (i = data_offs / ecc->size;
+ i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = bufpoi + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off,
+ oob,
+ oob_off + mtd->writesize,
+ &cur_off, &max_bitflips, !i,
+ false, page);
+ if (ret < 0)
+ return ret;
+ }
+
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *nand,
+ u32 data_offs, u32 readlen,
+ u8 *buf, int page)
+{
+ int nchunks = DIV_ROUND_UP(data_offs + readlen, nand->ecc.size);
+ int ret;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, false, page, nchunks);
+ if (ret >= 0)
+ return ret;
+
+ /* Fallback to PIO mode */
+ return sunxi_nfc_hw_ecc_read_subpage(nand, data_offs, readlen,
+ buf, page);
+}
+
+static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *nand,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ const u8 *data = buf + data_off;
+ const u8 *oob = nand->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, !i, page);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
+ &cur_off, page);
+
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ return nand_prog_page_end_op(nand);
+}
+
+static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *nand,
+ u32 data_offs, u32 data_len,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
+
+ for (i = data_offs / ecc->size;
+ i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ const u8 *data = buf + data_off;
+ const u8 *oob = nand->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, !i, page);
+ if (ret)
+ return ret;
+ }
+
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ return nand_prog_page_end_op(nand);
+}
+
+static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand,
+ const u8 *buf,
+ int oob_required,
+ int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ struct scatterlist sg;
+ int ret, i;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, ecc->steps,
+ DMA_TO_DEVICE, &sg);
+ if (ret)
+ goto pio_fallback;
+
+ for (i = 0; i < ecc->steps; i++) {
+ const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
+
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page);
+ }
+
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
+ sunxi_nfc_randomizer_config(nand, page, false);
+ sunxi_nfc_randomizer_enable(nand);
+
+ writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
+ nfc->regs + NFC_REG_WCMD_SET);
+
+ dma_async_issue_pending(nfc->dmac);
+
+ writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
+ NFC_DATA_TRANS | NFC_ACCESS_DIR,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
+ if (ret)
+ dmaengine_terminate_all(nfc->dmac);
+
+ sunxi_nfc_randomizer_disable(nand);
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ sunxi_nfc_dma_op_cleanup(nfc, DMA_TO_DEVICE, &sg);
+
+ if (ret)
+ return ret;
+
+ if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
+ /* TODO: use DMA to transfer extra OOB bytes ? */
+ sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
+ NULL, page);
+
+ return nand_prog_page_end_op(nand);
+
+pio_fallback:
+ return sunxi_nfc_hw_ecc_write_page(nand, buf, oob_required, page);
+}
+
+static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page)
+{
+ u8 *buf = nand_get_data_buf(nand);
+
+ return nand->ecc.read_page(nand, buf, 1, page);
+}
+
+static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ u8 *buf = nand_get_data_buf(nand);
+ int ret;
+
+ memset(buf, 0xff, mtd->writesize);
+ ret = nand->ecc.write_page(nand, buf, 1, page);
+ if (ret)
+ return ret;
+
+ /* Send command to program the OOB data */
+ return nand_prog_page_end_op(nand);
+}
+
+static const s32 tWB_lut[] = {6, 12, 16, 20};
+static const s32 tRHW_lut[] = {4, 8, 12, 20};
+
+static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
+ u32 clk_period)
+{
+ u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
+ int i;
+
+ for (i = 0; i < lut_size; i++) {
+ if (clk_cycles <= lut[i])
+ return i;
+ }
+
+ /* Doesn't fit */
+ return -EINVAL;
+}
+
+#define sunxi_nand_lookup_timing(l, p, c) \
+ _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
+
+static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ const struct nand_sdr_timings *timings;
+ u32 min_clk_period = 0;
+ s32 tWB, tADL, tWHR, tRHW, tCAD;
+ long real_clk_rate;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ /* T1 <=> tCLS */
+ if (timings->tCLS_min > min_clk_period)
+ min_clk_period = timings->tCLS_min;
+
+ /* T2 <=> tCLH */
+ if (timings->tCLH_min > min_clk_period)
+ min_clk_period = timings->tCLH_min;
+
+ /* T3 <=> tCS */
+ if (timings->tCS_min > min_clk_period)
+ min_clk_period = timings->tCS_min;
+
+ /* T4 <=> tCH */
+ if (timings->tCH_min > min_clk_period)
+ min_clk_period = timings->tCH_min;
+
+ /* T5 <=> tWP */
+ if (timings->tWP_min > min_clk_period)
+ min_clk_period = timings->tWP_min;
+
+ /* T6 <=> tWH */
+ if (timings->tWH_min > min_clk_period)
+ min_clk_period = timings->tWH_min;
+
+ /* T7 <=> tALS */
+ if (timings->tALS_min > min_clk_period)
+ min_clk_period = timings->tALS_min;
+
+ /* T8 <=> tDS */
+ if (timings->tDS_min > min_clk_period)
+ min_clk_period = timings->tDS_min;
+
+ /* T9 <=> tDH */
+ if (timings->tDH_min > min_clk_period)
+ min_clk_period = timings->tDH_min;
+
+ /* T10 <=> tRR */
+ if (timings->tRR_min > (min_clk_period * 3))
+ min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
+
+ /* T11 <=> tALH */
+ if (timings->tALH_min > min_clk_period)
+ min_clk_period = timings->tALH_min;
+
+ /* T12 <=> tRP */
+ if (timings->tRP_min > min_clk_period)
+ min_clk_period = timings->tRP_min;
+
+ /* T13 <=> tREH */
+ if (timings->tREH_min > min_clk_period)
+ min_clk_period = timings->tREH_min;
+
+ /* T14 <=> tRC */
+ if (timings->tRC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
+
+ /* T15 <=> tWC */
+ if (timings->tWC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
+
+ /* T16 - T19 + tCAD */
+ if (timings->tWB_max > (min_clk_period * 20))
+ min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
+
+ if (timings->tADL_min > (min_clk_period * 32))
+ min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
+
+ if (timings->tWHR_min > (min_clk_period * 32))
+ min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
+
+ if (timings->tRHW_min > (min_clk_period * 20))
+ min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
+
+ /*
+ * In non-EDO, tREA should be less than tRP to guarantee that the
+ * controller does not sample the IO lines too early. Unfortunately,
+ * the sunxi NAND controller does not allow us to have different
+ * values for tRP and tREH (tRP = tREH = tRW / 2).
+ *
+ * We have 2 options to overcome this limitation:
+ *
+ * 1/ Extend tRC to fulfil the tREA <= tRC / 2 constraint
+ * 2/ Use EDO mode (only works if timings->tRLOH > 0)
+ */
+ if (timings->tREA_max > min_clk_period && !timings->tRLOH_min)
+ min_clk_period = timings->tREA_max;
+
+ tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
+ min_clk_period);
+ if (tWB < 0) {
+ dev_err(nfc->dev, "unsupported tWB\n");
+ return tWB;
+ }
+
+ tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
+ if (tADL > 3) {
+ dev_err(nfc->dev, "unsupported tADL\n");
+ return -EINVAL;
+ }
+
+ tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
+ if (tWHR > 3) {
+ dev_err(nfc->dev, "unsupported tWHR\n");
+ return -EINVAL;
+ }
+
+ tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
+ min_clk_period);
+ if (tRHW < 0) {
+ dev_err(nfc->dev, "unsupported tRHW\n");
+ return tRHW;
+ }
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ /*
+ * TODO: according to ONFI specs this value only applies for DDR NAND,
+ * but Allwinner seems to set this to 0x7. Mimic them for now.
+ */
+ tCAD = 0x7;
+
+ /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
+ sunxi_nand->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
+
+ /* Convert min_clk_period from picoseconds to nanoseconds */
+ min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
+
+ /*
+ * Unlike what is stated in Allwinner datasheet, the clk_rate should
+ * be set to (1 / min_clk_period), and not (2 / min_clk_period).
+ * This new formula was verified with a scope and validated by
+ * Allwinner engineers.
+ */
+ sunxi_nand->clk_rate = NSEC_PER_SEC / min_clk_period;
+ real_clk_rate = clk_round_rate(nfc->mod_clk, sunxi_nand->clk_rate);
+ if (real_clk_rate <= 0) {
+ dev_err(nfc->dev, "Unable to round clk %lu\n",
+ sunxi_nand->clk_rate);
+ return -EINVAL;
+ }
+
+ sunxi_nand->timing_ctl = 0;
+
+ /*
+ * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
+ * output cycle timings shall be used if the host drives tRC less than
+ * 30 ns. We should also use EDO mode if tREA is bigger than tRP.
+ */
+ min_clk_period = NSEC_PER_SEC / real_clk_rate;
+ if (min_clk_period * 2 < 30 || min_clk_period * 1000 < timings->tREA_max)
+ sunxi_nand->timing_ctl = NFC_TIMING_CTL_EDO;
+
+ return 0;
+}
+
+static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+ if (section >= ecc->steps)
+ return -ERANGE;
+
+ oobregion->offset = section * (ecc->bytes + 4) + 4;
+ oobregion->length = ecc->bytes;
+
+ return 0;
+}
+
+static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+ if (section > ecc->steps)
+ return -ERANGE;
+
+ /*
+ * The first 2 bytes are used for BB markers, hence we
+ * only have 2 bytes available in the first user data
+ * section.
+ */
+ if (!section && ecc->engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ oobregion->offset = 2;
+ oobregion->length = 2;
+
+ return 0;
+ }
+
+ oobregion->offset = section * (ecc->bytes + 4);
+
+ if (section < ecc->steps)
+ oobregion->length = 4;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
+ .ecc = sunxi_nand_ooblayout_ecc,
+ .free = sunxi_nand_ooblayout_free,
+};
+
+static void sunxi_nand_hw_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
+{
+ kfree(ecc->priv);
+}
+
+static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
+ struct sunxi_nand_hw_ecc *data;
+ int nsectors;
+ int ret;
+ int i;
+
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
+ int bytes;
+
+ ecc->size = 1024;
+ nsectors = mtd->writesize / ecc->size;
+
+ /* Reserve 2 bytes for the BBM */
+ bytes = (mtd->oobsize - 2) / nsectors;
+
+ /* 4 non-ECC bytes are added before each ECC bytes section */
+ bytes -= 4;
+
+ /* and bytes has to be even. */
+ if (bytes % 2)
+ bytes--;
+
+ ecc->strength = bytes * 8 / fls(8 * ecc->size);
+
+ for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ if (strengths[i] > ecc->strength)
+ break;
+ }
+
+ if (!i)
+ ecc->strength = 0;
+ else
+ ecc->strength = strengths[i - 1];
+ }
+
+ if (ecc->size != 512 && ecc->size != 1024)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Prefer 1k ECC chunk over 512 ones */
+ if (ecc->size == 512 && mtd->writesize > 512) {
+ ecc->size = 1024;
+ ecc->strength *= 2;
+ }
+
+ /* Add ECC info retrieval from DT */
+ for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ if (ecc->strength <= strengths[i]) {
+ /*
+ * Update ecc->strength value with the actual strength
+ * that will be used by the ECC engine.
+ */
+ ecc->strength = strengths[i];
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(strengths)) {
+ dev_err(nfc->dev, "unsupported strength\n");
+ ret = -ENOTSUPP;
+ goto err;
+ }
+
+ data->mode = i;
+
+ /* HW ECC always request ECC bytes for 1024 bytes blocks */
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
+
+ /* HW ECC always work with even numbers of ECC bytes */
+ ecc->bytes = ALIGN(ecc->bytes, 2);
+
+ nsectors = mtd->writesize / ecc->size;
+
+ if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ecc->read_oob = sunxi_nfc_hw_ecc_read_oob;
+ ecc->write_oob = sunxi_nfc_hw_ecc_write_oob;
+ mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
+ ecc->priv = data;
+
+ if (nfc->dmac) {
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
+ ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
+ nand->options |= NAND_USES_DMA;
+ } else {
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+ ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+ }
+
+ /* TODO: support DMA for raw accesses and subpage write */
+ ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
+ ecc->read_oob_raw = nand_read_oob_std;
+ ecc->write_oob_raw = nand_write_oob_std;
+
+ return 0;
+
+err:
+ kfree(data);
+
+ return ret;
+}
+
+static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
+{
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ sunxi_nand_hw_ecc_ctrl_cleanup(ecc);
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ default:
+ break;
+ }
+}
+
+static int sunxi_nand_attach_chip(struct nand_chip *nand)
+{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&nand->base);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ struct device_node *np = nand_get_flash_node(nand);
+ int ret;
+
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ nand->options |= NAND_SUBPAGE_READ;
+
+ if (!ecc->size) {
+ ecc->size = requirements->step_size;
+ ecc->strength = requirements->strength;
+ }
+
+ if (!ecc->size || !ecc->strength)
+ return -EINVAL;
+
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sunxi_nfc_exec_subop(struct nand_chip *nand,
+ const struct nand_subop *subop)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u32 cmd = 0, extcmd = 0, cnt = 0, addrs[2] = { };
+ unsigned int i, j, remaining, start;
+ void *inbuf = NULL;
+ int ret;
+
+ for (i = 0; i < subop->ninstrs; i++) {
+ const struct nand_op_instr *instr = &subop->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (cmd & NFC_SEND_CMD1) {
+ if (WARN_ON(cmd & NFC_SEND_CMD2))
+ return -EINVAL;
+
+ cmd |= NFC_SEND_CMD2;
+ extcmd |= instr->ctx.cmd.opcode;
+ } else {
+ cmd |= NFC_SEND_CMD1 |
+ NFC_CMD(instr->ctx.cmd.opcode);
+ }
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ remaining = nand_subop_get_num_addr_cyc(subop, i);
+ start = nand_subop_get_addr_start_off(subop, i);
+ for (j = 0; j < 8 && j + start < remaining; j++) {
+ u32 addr = instr->ctx.addr.addrs[j + start];
+
+ addrs[j / 4] |= addr << (j % 4) * 8;
+ }
+
+ if (j)
+ cmd |= NFC_SEND_ADR | NFC_ADR_NUM(j);
+
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ start = nand_subop_get_data_start_off(subop, i);
+ remaining = nand_subop_get_data_len(subop, i);
+ cnt = min_t(u32, remaining, NFC_SRAM_SIZE);
+ cmd |= NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR) {
+ cmd |= NFC_ACCESS_DIR;
+ memcpy_toio(nfc->regs + NFC_RAM0_BASE,
+ instr->ctx.data.buf.out + start,
+ cnt);
+ } else {
+ inbuf = instr->ctx.data.buf.in + start;
+ }
+
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ cmd |= NFC_WAIT_FLAG;
+ break;
+ }
+ }
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ if (cmd & NFC_SEND_ADR) {
+ writel(addrs[0], nfc->regs + NFC_REG_ADDR_LOW);
+ writel(addrs[1], nfc->regs + NFC_REG_ADDR_HIGH);
+ }
+
+ if (cmd & NFC_SEND_CMD2)
+ writel(extcmd,
+ nfc->regs +
+ (cmd & NFC_ACCESS_DIR ?
+ NFC_REG_WCMD_SET : NFC_REG_RCMD_SET));
+
+ if (cmd & NFC_DATA_TRANS)
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+
+ writel(cmd, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG,
+ !(cmd & NFC_WAIT_FLAG) && cnt < 64,
+ 0);
+ if (ret)
+ return ret;
+
+ if (inbuf)
+ memcpy_fromio(inbuf, nfc->regs + NFC_RAM0_BASE, cnt);
+
+ return 0;
+}
+
+static int sunxi_nfc_soft_waitrdy(struct nand_chip *nand,
+ const struct nand_subop *subop)
+{
+ return nand_soft_waitrdy(nand,
+ subop->instrs[0].ctx.waitrdy.timeout_ms);
+}
+
+static const struct nand_op_parser sunxi_nfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+);
+
+static const struct nand_op_parser sunxi_nfc_norb_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_soft_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+);
+
+static int sunxi_nfc_exec_op(struct nand_chip *nand,
+ const struct nand_operation *op, bool check_only)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ const struct nand_op_parser *parser;
+
+ if (!check_only)
+ sunxi_nfc_select_chip(nand, op->cs);
+
+ if (sunxi_nand->sels[op->cs].rb >= 0)
+ parser = &sunxi_nfc_op_parser;
+ else
+ parser = &sunxi_nfc_norb_op_parser;
+
+ return nand_op_parser_exec_op(nand, parser, op, check_only);
+}
+
+static const struct nand_controller_ops sunxi_nand_controller_ops = {
+ .attach_chip = sunxi_nand_attach_chip,
+ .setup_interface = sunxi_nfc_setup_interface,
+ .exec_op = sunxi_nfc_exec_op,
+};
+
+static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
+ struct device_node *np)
+{
+ struct sunxi_nand_chip *sunxi_nand;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ int nsels;
+ int ret;
+ int i;
+ u32 tmp;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -EINVAL;
+
+ nsels /= sizeof(u32);
+ if (!nsels) {
+ dev_err(dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels),
+ GFP_KERNEL);
+ if (!sunxi_nand) {
+ dev_err(dev, "could not allocate chip\n");
+ return -ENOMEM;
+ }
+
+ sunxi_nand->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (tmp > NFC_MAX_CS) {
+ dev_err(dev,
+ "invalid reg value: %u (max CS = 7)\n",
+ tmp);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", tmp);
+ return -EINVAL;
+ }
+
+ sunxi_nand->sels[i].cs = tmp;
+
+ if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
+ tmp < 2)
+ sunxi_nand->sels[i].rb = tmp;
+ else
+ sunxi_nand->sels[i].rb = -1;
+ }
+
+ nand = &sunxi_nand->nand;
+ /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
+ nand->controller = &nfc->controller;
+ nand->controller->ops = &sunxi_nand_controller_ops;
+
+ /*
+ * Set the ECC mode to the default value in case nothing is specified
+ * in the DT.
+ */
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ nand_set_flash_node(nand, np);
+
+ mtd = nand_to_mtd(nand);
+ mtd->dev.parent = dev;
+
+ ret = nand_scan(nand, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_cleanup(nand);
+ return ret;
+ }
+
+ list_add_tail(&sunxi_nand->node, &nfc->chips);
+
+ return 0;
+}
+
+static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (nchips > 8) {
+ dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = sunxi_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
+{
+ struct sunxi_nand_chip *sunxi_nand;
+ struct nand_chip *chip;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ sunxi_nand = list_first_entry(&nfc->chips,
+ struct sunxi_nand_chip,
+ node);
+ chip = &sunxi_nand->nand;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ sunxi_nand_ecc_cleanup(&chip->ecc);
+ list_del(&sunxi_nand->node);
+ }
+}
+
+static int sunxi_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct sunxi_nfc *nfc;
+ int irq;
+ int ret;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ nfc->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(nfc->ahb_clk)) {
+ dev_err(dev, "failed to retrieve ahb clk\n");
+ return PTR_ERR(nfc->ahb_clk);
+ }
+
+ ret = clk_prepare_enable(nfc->ahb_clk);
+ if (ret)
+ return ret;
+
+ nfc->mod_clk = devm_clk_get(dev, "mod");
+ if (IS_ERR(nfc->mod_clk)) {
+ dev_err(dev, "failed to retrieve mod clk\n");
+ ret = PTR_ERR(nfc->mod_clk);
+ goto out_ahb_clk_unprepare;
+ }
+
+ ret = clk_prepare_enable(nfc->mod_clk);
+ if (ret)
+ goto out_ahb_clk_unprepare;
+
+ nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb");
+ if (IS_ERR(nfc->reset)) {
+ ret = PTR_ERR(nfc->reset);
+ goto out_mod_clk_unprepare;
+ }
+
+ ret = reset_control_deassert(nfc->reset);
+ if (ret) {
+ dev_err(dev, "reset err %d\n", ret);
+ goto out_mod_clk_unprepare;
+ }
+
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+ if (!nfc->caps) {
+ ret = -EINVAL;
+ goto out_ahb_reset_reassert;
+ }
+
+ ret = sunxi_nfc_rst(nfc);
+ if (ret)
+ goto out_ahb_reset_reassert;
+
+ writel(0, nfc->regs + NFC_REG_INT);
+ ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
+ 0, "sunxi-nand", nfc);
+ if (ret)
+ goto out_ahb_reset_reassert;
+
+ nfc->dmac = dma_request_chan(dev, "rxtx");
+ if (IS_ERR(nfc->dmac)) {
+ ret = PTR_ERR(nfc->dmac);
+ if (ret == -EPROBE_DEFER)
+ goto out_ahb_reset_reassert;
+
+ /* Ignore errors to fall back to PIO mode */
+ dev_warn(dev, "failed to request rxtx DMA channel: %d\n", ret);
+ nfc->dmac = NULL;
+ } else {
+ struct dma_slave_config dmac_cfg = { };
+
+ dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
+ dmac_cfg.dst_addr = dmac_cfg.src_addr;
+ dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
+ dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
+ dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
+ dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+
+ if (nfc->caps->extra_mbus_conf)
+ writel(readl(nfc->regs + NFC_REG_CTL) |
+ NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = sunxi_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init nand chips\n");
+ goto out_release_dmac;
+ }
+
+ return 0;
+
+out_release_dmac:
+ if (nfc->dmac)
+ dma_release_channel(nfc->dmac);
+out_ahb_reset_reassert:
+ reset_control_assert(nfc->reset);
+out_mod_clk_unprepare:
+ clk_disable_unprepare(nfc->mod_clk);
+out_ahb_clk_unprepare:
+ clk_disable_unprepare(nfc->ahb_clk);
+
+ return ret;
+}
+
+static int sunxi_nfc_remove(struct platform_device *pdev)
+{
+ struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
+
+ sunxi_nand_chips_cleanup(nfc);
+
+ reset_control_assert(nfc->reset);
+
+ if (nfc->dmac)
+ dma_release_channel(nfc->dmac);
+ clk_disable_unprepare(nfc->mod_clk);
+ clk_disable_unprepare(nfc->ahb_clk);
+
+ return 0;
+}
+
+static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
+ .reg_io_data = NFC_REG_A10_IO_DATA,
+ .dma_maxburst = 4,
+};
+
+static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
+ .extra_mbus_conf = true,
+ .reg_io_data = NFC_REG_A23_IO_DATA,
+ .dma_maxburst = 8,
+};
+
+static const struct of_device_id sunxi_nfc_ids[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-nand",
+ .data = &sunxi_nfc_a10_caps,
+ },
+ {
+ .compatible = "allwinner,sun8i-a23-nand-controller",
+ .data = &sunxi_nfc_a23_caps,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
+
+static struct platform_driver sunxi_nfc_driver = {
+ .driver = {
+ .name = "sunxi_nand",
+ .of_match_table = sunxi_nfc_ids,
+ },
+ .probe = sunxi_nfc_probe,
+ .remove = sunxi_nfc_remove,
+};
+module_platform_driver(sunxi_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Boris BREZILLON");
+MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
+MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
new file mode 100644
index 000000000..359187b5a
--- /dev/null
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Sigma Designs
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+/* Offsets relative to chip->base */
+#define PBUS_CMD 0
+#define PBUS_ADDR 4
+#define PBUS_DATA 8
+
+/* Offsets relative to reg_base */
+#define NFC_STATUS 0x00
+#define NFC_FLASH_CMD 0x04
+#define NFC_DEVICE_CFG 0x08
+#define NFC_TIMING1 0x0c
+#define NFC_TIMING2 0x10
+#define NFC_XFER_CFG 0x14
+#define NFC_PKT_0_CFG 0x18
+#define NFC_PKT_N_CFG 0x1c
+#define NFC_BB_CFG 0x20
+#define NFC_ADDR_PAGE 0x24
+#define NFC_ADDR_OFFSET 0x28
+#define NFC_XFER_STATUS 0x2c
+
+/* NFC_STATUS values */
+#define CMD_READY BIT(31)
+
+/* NFC_FLASH_CMD values */
+#define NFC_READ 1
+#define NFC_WRITE 2
+
+/* NFC_XFER_STATUS values */
+#define PAGE_IS_EMPTY BIT(16)
+
+/* Offsets relative to mem_base */
+#define METADATA 0x000
+#define ERROR_REPORT 0x1c0
+
+/*
+ * Error reports are split in two bytes:
+ * byte 0 for the first packet in the page (PKT_0)
+ * byte 1 for other packets in the page (PKT_N, for N > 0)
+ * ERR_COUNT_PKT_N is the max error count over all but the first packet.
+ */
+#define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f)
+#define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f)
+#define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0)
+#define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0)
+
+/* Offsets relative to pbus_base */
+#define PBUS_CS_CTRL 0x83c
+#define PBUS_PAD_MODE 0x8f0
+
+/* PBUS_CS_CTRL values */
+#define PBUS_IORDY BIT(31)
+
+/*
+ * PBUS_PAD_MODE values
+ * In raw mode, the driver communicates directly with the NAND chips.
+ * In NFC mode, the NAND Flash controller manages the communication.
+ * We use NFC mode for read and write; raw mode for everything else.
+ */
+#define MODE_RAW 0
+#define MODE_NFC BIT(31)
+
+#define METADATA_SIZE 4
+#define BBM_SIZE 6
+#define FIELD_ORDER 15
+
+#define MAX_CS 4
+
+struct tango_nfc {
+ struct nand_controller hw;
+ void __iomem *reg_base;
+ void __iomem *mem_base;
+ void __iomem *pbus_base;
+ struct tango_chip *chips[MAX_CS];
+ struct dma_chan *chan;
+ int freq_kHz;
+};
+
+#define to_tango_nfc(ptr) container_of(ptr, struct tango_nfc, hw)
+
+struct tango_chip {
+ struct nand_chip nand_chip;
+ void __iomem *base;
+ u32 timing1;
+ u32 timing2;
+ u32 xfer_cfg;
+ u32 pkt_0_cfg;
+ u32 pkt_n_cfg;
+ u32 bb_cfg;
+};
+
+#define to_tango_chip(ptr) container_of(ptr, struct tango_chip, nand_chip)
+
+#define XFER_CFG(cs, page_count, steps, metadata_size) \
+ ((cs) << 24 | (page_count) << 16 | (steps) << 8 | (metadata_size))
+
+#define PKT_CFG(size, strength) ((size) << 16 | (strength))
+
+#define BB_CFG(bb_offset, bb_size) ((bb_offset) << 16 | (bb_size))
+
+#define TIMING(t0, t1, t2, t3) ((t0) << 24 | (t1) << 16 | (t2) << 8 | (t3))
+
+static void tango_select_target(struct nand_chip *chip, unsigned int cs)
+{
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ struct tango_chip *tchip = to_tango_chip(chip);
+
+ writel_relaxed(tchip->timing1, nfc->reg_base + NFC_TIMING1);
+ writel_relaxed(tchip->timing2, nfc->reg_base + NFC_TIMING2);
+ writel_relaxed(tchip->xfer_cfg, nfc->reg_base + NFC_XFER_CFG);
+ writel_relaxed(tchip->pkt_0_cfg, nfc->reg_base + NFC_PKT_0_CFG);
+ writel_relaxed(tchip->pkt_n_cfg, nfc->reg_base + NFC_PKT_N_CFG);
+ writel_relaxed(tchip->bb_cfg, nfc->reg_base + NFC_BB_CFG);
+}
+
+static int tango_waitrdy(struct nand_chip *chip, unsigned int timeout_ms)
+{
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ u32 status;
+
+ return readl_relaxed_poll_timeout(nfc->pbus_base + PBUS_CS_CTRL,
+ status, status & PBUS_IORDY, 20,
+ timeout_ms);
+}
+
+static int tango_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct tango_chip *tchip = to_tango_chip(chip);
+ unsigned int i;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb_relaxed(instr->ctx.cmd.opcode, tchip->base + PBUS_CMD);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb_relaxed(instr->ctx.addr.addrs[i],
+ tchip->base + PBUS_ADDR);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ ioread8_rep(tchip->base + PBUS_DATA, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ iowrite8_rep(tchip->base + PBUS_DATA, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ return tango_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int tango_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ tango_select_target(chip, op->cs);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = tango_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * The controller does not check for bitflips in erased pages,
+ * therefore software must check instead.
+ */
+static int check_erased_page(struct nand_chip *chip, u8 *buf)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *meta = chip->oob_poi + BBM_SIZE;
+ u8 *ecc = chip->oob_poi + BBM_SIZE + METADATA_SIZE;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int i, res, meta_len, bitflips = 0;
+
+ for (i = 0; i < chip->ecc.steps; ++i) {
+ meta_len = i ? 0 : METADATA_SIZE;
+ res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
+ meta, meta_len,
+ chip->ecc.strength);
+ if (res < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += res;
+
+ bitflips = max(res, bitflips);
+ buf += pkt_size;
+ ecc += ecc_size;
+ }
+
+ return bitflips;
+}
+
+static int decode_error_report(struct nand_chip *chip)
+{
+ u32 status, res;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+
+ status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
+ if (status & PAGE_IS_EMPTY)
+ return 0;
+
+ res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
+
+ if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res))
+ return -EBADMSG;
+
+ /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */
+ mtd->ecc_stats.corrected +=
+ ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res);
+
+ return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+}
+
+static void tango_dma_callback(void *arg)
+{
+ complete(arg);
+}
+
+static int do_dma(struct tango_nfc *nfc, enum dma_data_direction dir, int cmd,
+ const void *buf, int len, int page)
+{
+ void __iomem *addr = nfc->reg_base + NFC_STATUS;
+ struct dma_chan *chan = nfc->chan;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction tdir;
+ struct scatterlist sg;
+ struct completion tx_done;
+ int err = -EIO;
+ u32 res, val;
+
+ sg_init_one(&sg, buf, len);
+ if (dma_map_sg(chan->device->dev, &sg, 1, dir) != 1)
+ return -EIO;
+
+ tdir = dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ desc = dmaengine_prep_slave_sg(chan, &sg, 1, tdir, DMA_PREP_INTERRUPT);
+ if (!desc)
+ goto dma_unmap;
+
+ desc->callback = tango_dma_callback;
+ desc->callback_param = &tx_done;
+ init_completion(&tx_done);
+
+ writel_relaxed(MODE_NFC, nfc->pbus_base + PBUS_PAD_MODE);
+
+ writel_relaxed(page, nfc->reg_base + NFC_ADDR_PAGE);
+ writel_relaxed(0, nfc->reg_base + NFC_ADDR_OFFSET);
+ writel_relaxed(cmd, nfc->reg_base + NFC_FLASH_CMD);
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+ res = wait_for_completion_timeout(&tx_done, HZ);
+ if (res > 0)
+ err = readl_poll_timeout(addr, val, val & CMD_READY, 0, 1000);
+
+ writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+
+dma_unmap:
+ dma_unmap_sg(chan->device->dev, &sg, 1, dir);
+
+ return err;
+}
+
+static int tango_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ int err, res, len = mtd->writesize;
+
+ tango_select_target(chip, chip->cur_cs);
+ if (oob_required)
+ chip->ecc.read_oob(chip, page);
+
+ err = do_dma(nfc, DMA_FROM_DEVICE, NFC_READ, buf, len, page);
+ if (err)
+ return err;
+
+ res = decode_error_report(chip);
+ if (res < 0) {
+ chip->ecc.read_oob_raw(chip, page);
+ res = check_erased_page(chip, buf);
+ }
+
+ return res;
+}
+
+static int tango_write_page(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ const struct nand_sdr_timings *timings;
+ int err, len = mtd->writesize;
+ u8 status;
+
+ /* Calling tango_write_oob() would send PAGEPROG twice */
+ if (oob_required)
+ return -ENOTSUPP;
+
+ tango_select_target(chip, chip->cur_cs);
+ writel_relaxed(0xffffffff, nfc->mem_base + METADATA);
+ err = do_dma(nfc, DMA_TO_DEVICE, NFC_WRITE, buf, len, page);
+ if (err)
+ return err;
+
+ timings = nand_get_sdr_timings(nand_get_interface_config(chip));
+ err = tango_waitrdy(chip, PSEC_TO_MSEC(timings->tR_max));
+ if (err)
+ return err;
+
+ err = nand_status_op(chip, &status);
+ if (err)
+ return err;
+
+ return (status & NAND_STATUS_FAIL) ? -EIO : 0;
+}
+
+static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
+{
+ *pos += len;
+
+ if (!*buf) {
+ /* skip over "len" bytes */
+ nand_change_read_column_op(chip, *pos, NULL, 0, false);
+ } else {
+ struct tango_chip *tchip = to_tango_chip(chip);
+
+ ioread8_rep(tchip->base + PBUS_DATA, *buf, len);
+ *buf += len;
+ }
+}
+
+static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
+{
+ *pos += len;
+
+ if (!*buf) {
+ /* skip over "len" bytes */
+ nand_change_write_column_op(chip, *pos, NULL, 0, false);
+ } else {
+ struct tango_chip *tchip = to_tango_chip(chip);
+
+ iowrite8_rep(tchip->base + PBUS_DATA, *buf, len);
+ *buf += len;
+ }
+}
+
+/*
+ * Physical page layout (not drawn to scale)
+ *
+ * NB: Bad Block Marker area splits PKT_N in two (N1, N2).
+ *
+ * +---+-----------------+-------+-----+-----------+-----+----+-------+
+ * | M | PKT_0 | ECC_0 | ... | N1 | BBM | N2 | ECC_N |
+ * +---+-----------------+-------+-----+-----------+-----+----+-------+
+ *
+ * Logical page layout:
+ *
+ * +-----+---+-------+-----+-------+
+ * oob = | BBM | M | ECC_0 | ... | ECC_N |
+ * +-----+---+-------+-----+-------+
+ *
+ * +-----------------+-----+-----------------+
+ * buf = | PKT_0 | ... | PKT_N |
+ * +-----------------+-----+-----------------+
+ */
+static void raw_read(struct nand_chip *chip, u8 *buf, u8 *oob)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *oob_orig = oob;
+ const int page_size = mtd->writesize;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int pos = 0; /* position within physical page */
+ int rem = page_size; /* bytes remaining until BBM area */
+
+ if (oob)
+ oob += BBM_SIZE;
+
+ aux_read(chip, &oob, METADATA_SIZE, &pos);
+
+ while (rem > pkt_size) {
+ aux_read(chip, &buf, pkt_size, &pos);
+ aux_read(chip, &oob, ecc_size, &pos);
+ rem = page_size - pos;
+ }
+
+ aux_read(chip, &buf, rem, &pos);
+ aux_read(chip, &oob_orig, BBM_SIZE, &pos);
+ aux_read(chip, &buf, pkt_size - rem, &pos);
+ aux_read(chip, &oob, ecc_size, &pos);
+}
+
+static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *oob_orig = oob;
+ const int page_size = mtd->writesize;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int pos = 0; /* position within physical page */
+ int rem = page_size; /* bytes remaining until BBM area */
+
+ if (oob)
+ oob += BBM_SIZE;
+
+ aux_write(chip, &oob, METADATA_SIZE, &pos);
+
+ while (rem > pkt_size) {
+ aux_write(chip, &buf, pkt_size, &pos);
+ aux_write(chip, &oob, ecc_size, &pos);
+ rem = page_size - pos;
+ }
+
+ aux_write(chip, &buf, rem, &pos);
+ aux_write(chip, &oob_orig, BBM_SIZE, &pos);
+ aux_write(chip, &buf, pkt_size - rem, &pos);
+ aux_write(chip, &oob, ecc_size, &pos);
+}
+
+static int tango_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ tango_select_target(chip, chip->cur_cs);
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ raw_read(chip, buf, chip->oob_poi);
+ return 0;
+}
+
+static int tango_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ tango_select_target(chip, chip->cur_cs);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ raw_write(chip, buf, chip->oob_poi);
+ return nand_prog_page_end_op(chip);
+}
+
+static int tango_read_oob(struct nand_chip *chip, int page)
+{
+ tango_select_target(chip, chip->cur_cs);
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ raw_read(chip, NULL, chip->oob_poi);
+ return 0;
+}
+
+static int tango_write_oob(struct nand_chip *chip, int page)
+{
+ tango_select_target(chip, chip->cur_cs);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ raw_write(chip, NULL, chip->oob_poi);
+ return nand_prog_page_end_op(chip);
+}
+
+static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (idx >= ecc->steps)
+ return -ERANGE;
+
+ res->offset = BBM_SIZE + METADATA_SIZE + ecc->bytes * idx;
+ res->length = ecc->bytes;
+
+ return 0;
+}
+
+static int oob_free(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
+{
+ return -ERANGE; /* no free space in spare area */
+}
+
+static const struct mtd_ooblayout_ops tango_nand_ooblayout_ops = {
+ .ecc = oob_ecc,
+ .free = oob_free,
+};
+
+static u32 to_ticks(int kHz, int ps)
+{
+ return DIV_ROUND_UP_ULL((u64)kHz * ps, NSEC_PER_SEC);
+}
+
+static int tango_set_timings(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ struct tango_chip *tchip = to_tango_chip(chip);
+ u32 Trdy, Textw, Twc, Twpw, Tacc, Thold, Trpw, Textr;
+ int kHz = nfc->freq_kHz;
+
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ Trdy = to_ticks(kHz, sdr->tCEA_max - sdr->tREA_max);
+ Textw = to_ticks(kHz, sdr->tWB_max);
+ Twc = to_ticks(kHz, sdr->tWC_min);
+ Twpw = to_ticks(kHz, sdr->tWC_min - sdr->tWP_min);
+
+ Tacc = to_ticks(kHz, sdr->tREA_max);
+ Thold = to_ticks(kHz, sdr->tREH_min);
+ Trpw = to_ticks(kHz, sdr->tRC_min - sdr->tREH_min);
+ Textr = to_ticks(kHz, sdr->tRHZ_max);
+
+ tchip->timing1 = TIMING(Trdy, Textw, Twc, Twpw);
+ tchip->timing2 = TIMING(Tacc, Thold, Trpw, Textr);
+
+ return 0;
+}
+
+static int tango_attach_chip(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ ecc->algo = NAND_ECC_ALGO_BCH;
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
+
+ ecc->read_page_raw = tango_read_page_raw;
+ ecc->write_page_raw = tango_write_page_raw;
+ ecc->read_page = tango_read_page;
+ ecc->write_page = tango_write_page;
+ ecc->read_oob = tango_read_oob;
+ ecc->write_oob = tango_write_oob;
+
+ return 0;
+}
+
+static const struct nand_controller_ops tango_controller_ops = {
+ .attach_chip = tango_attach_chip,
+ .setup_interface = tango_set_timings,
+ .exec_op = tango_exec_op,
+};
+
+static int chip_init(struct device *dev, struct device_node *np)
+{
+ u32 cs;
+ int err, res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct tango_chip *tchip;
+ struct nand_ecc_ctrl *ecc;
+ struct tango_nfc *nfc = dev_get_drvdata(dev);
+
+ tchip = devm_kzalloc(dev, sizeof(*tchip), GFP_KERNEL);
+ if (!tchip)
+ return -ENOMEM;
+
+ res = of_property_count_u32_elems(np, "reg");
+ if (res < 0)
+ return res;
+
+ if (res != 1)
+ return -ENOTSUPP; /* Multi-CS chips are not supported */
+
+ err = of_property_read_u32_index(np, "reg", 0, &cs);
+ if (err)
+ return err;
+
+ if (cs >= MAX_CS)
+ return -EINVAL;
+
+ chip = &tchip->nand_chip;
+ ecc = &chip->ecc;
+ mtd = nand_to_mtd(chip);
+
+ chip->options = NAND_USES_DMA |
+ NAND_NO_SUBPAGE_WRITE |
+ NAND_WAIT_TCCS;
+ chip->controller = &nfc->hw;
+ tchip->base = nfc->pbus_base + (cs * 256);
+
+ nand_set_flash_node(chip, np);
+ mtd_set_ooblayout(mtd, &tango_nand_ooblayout_ops);
+ mtd->dev.parent = dev;
+
+ err = nand_scan(chip, 1);
+ if (err)
+ return err;
+
+ tchip->xfer_cfg = XFER_CFG(cs, 1, ecc->steps, METADATA_SIZE);
+ tchip->pkt_0_cfg = PKT_CFG(ecc->size + METADATA_SIZE, ecc->strength);
+ tchip->pkt_n_cfg = PKT_CFG(ecc->size, ecc->strength);
+ tchip->bb_cfg = BB_CFG(mtd->writesize, BBM_SIZE);
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err) {
+ nand_cleanup(chip);
+ return err;
+ }
+
+ nfc->chips[cs] = tchip;
+
+ return 0;
+}
+
+static int tango_nand_remove(struct platform_device *pdev)
+{
+ struct tango_nfc *nfc = platform_get_drvdata(pdev);
+ struct nand_chip *chip;
+ int cs, ret;
+
+ dma_release_channel(nfc->chan);
+
+ for (cs = 0; cs < MAX_CS; ++cs) {
+ if (nfc->chips[cs]) {
+ chip = &nfc->chips[cs]->nand_chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ }
+ }
+
+ return 0;
+}
+
+static int tango_nand_probe(struct platform_device *pdev)
+{
+ int err;
+ struct clk *clk;
+ struct resource *res;
+ struct tango_nfc *nfc;
+ struct device_node *np;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->reg_base))
+ return PTR_ERR(nfc->reg_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ nfc->mem_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->mem_base))
+ return PTR_ERR(nfc->mem_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ nfc->pbus_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->pbus_base))
+ return PTR_ERR(nfc->pbus_base);
+
+ writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ nfc->chan = dma_request_chan(&pdev->dev, "rxtx");
+ if (IS_ERR(nfc->chan))
+ return PTR_ERR(nfc->chan);
+
+ platform_set_drvdata(pdev, nfc);
+ nand_controller_init(&nfc->hw);
+ nfc->hw.ops = &tango_controller_ops;
+ nfc->freq_kHz = clk_get_rate(clk) / 1000;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ err = chip_init(&pdev->dev, np);
+ if (err) {
+ tango_nand_remove(pdev);
+ of_node_put(np);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id tango_nand_ids[] = {
+ { .compatible = "sigma,smp8758-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tango_nand_ids);
+
+static struct platform_driver tango_nand_driver = {
+ .probe = tango_nand_probe,
+ .remove = tango_nand_remove,
+ .driver = {
+ .name = "tango-nand",
+ .of_match_table = tango_nand_ids,
+ },
+};
+
+module_platform_driver(tango_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sigma Designs");
+MODULE_DESCRIPTION("Tango4 NAND Flash controller driver");
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
new file mode 100644
index 000000000..fbf67722a
--- /dev/null
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -0,0 +1,1249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
+ * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
+ * Copyright (C) 2012 Avionic Design GmbH
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#define COMMAND 0x00
+#define COMMAND_GO BIT(31)
+#define COMMAND_CLE BIT(30)
+#define COMMAND_ALE BIT(29)
+#define COMMAND_PIO BIT(28)
+#define COMMAND_TX BIT(27)
+#define COMMAND_RX BIT(26)
+#define COMMAND_SEC_CMD BIT(25)
+#define COMMAND_AFT_DAT BIT(24)
+#define COMMAND_TRANS_SIZE(size) ((((size) - 1) & 0xf) << 20)
+#define COMMAND_A_VALID BIT(19)
+#define COMMAND_B_VALID BIT(18)
+#define COMMAND_RD_STATUS_CHK BIT(17)
+#define COMMAND_RBSY_CHK BIT(16)
+#define COMMAND_CE(x) BIT(8 + ((x) & 0x7))
+#define COMMAND_CLE_SIZE(size) ((((size) - 1) & 0x3) << 4)
+#define COMMAND_ALE_SIZE(size) ((((size) - 1) & 0xf) << 0)
+
+#define STATUS 0x04
+
+#define ISR 0x08
+#define ISR_CORRFAIL_ERR BIT(24)
+#define ISR_UND BIT(7)
+#define ISR_OVR BIT(6)
+#define ISR_CMD_DONE BIT(5)
+#define ISR_ECC_ERR BIT(4)
+
+#define IER 0x0c
+#define IER_ERR_TRIG_VAL(x) (((x) & 0xf) << 16)
+#define IER_UND BIT(7)
+#define IER_OVR BIT(6)
+#define IER_CMD_DONE BIT(5)
+#define IER_ECC_ERR BIT(4)
+#define IER_GIE BIT(0)
+
+#define CONFIG 0x10
+#define CONFIG_HW_ECC BIT(31)
+#define CONFIG_ECC_SEL BIT(30)
+#define CONFIG_ERR_COR BIT(29)
+#define CONFIG_PIPE_EN BIT(28)
+#define CONFIG_TVAL_4 (0 << 24)
+#define CONFIG_TVAL_6 (1 << 24)
+#define CONFIG_TVAL_8 (2 << 24)
+#define CONFIG_SKIP_SPARE BIT(23)
+#define CONFIG_BUS_WIDTH_16 BIT(21)
+#define CONFIG_COM_BSY BIT(20)
+#define CONFIG_PS_256 (0 << 16)
+#define CONFIG_PS_512 (1 << 16)
+#define CONFIG_PS_1024 (2 << 16)
+#define CONFIG_PS_2048 (3 << 16)
+#define CONFIG_PS_4096 (4 << 16)
+#define CONFIG_SKIP_SPARE_SIZE_4 (0 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_8 (1 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_12 (2 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_16 (3 << 14)
+#define CONFIG_TAG_BYTE_SIZE(x) ((x) & 0xff)
+
+#define TIMING_1 0x14
+#define TIMING_TRP_RESP(x) (((x) & 0xf) << 28)
+#define TIMING_TWB(x) (((x) & 0xf) << 24)
+#define TIMING_TCR_TAR_TRR(x) (((x) & 0xf) << 20)
+#define TIMING_TWHR(x) (((x) & 0xf) << 16)
+#define TIMING_TCS(x) (((x) & 0x3) << 14)
+#define TIMING_TWH(x) (((x) & 0x3) << 12)
+#define TIMING_TWP(x) (((x) & 0xf) << 8)
+#define TIMING_TRH(x) (((x) & 0x3) << 4)
+#define TIMING_TRP(x) (((x) & 0xf) << 0)
+
+#define RESP 0x18
+
+#define TIMING_2 0x1c
+#define TIMING_TADL(x) ((x) & 0xf)
+
+#define CMD_REG1 0x20
+#define CMD_REG2 0x24
+#define ADDR_REG1 0x28
+#define ADDR_REG2 0x2c
+
+#define DMA_MST_CTRL 0x30
+#define DMA_MST_CTRL_GO BIT(31)
+#define DMA_MST_CTRL_IN (0 << 30)
+#define DMA_MST_CTRL_OUT BIT(30)
+#define DMA_MST_CTRL_PERF_EN BIT(29)
+#define DMA_MST_CTRL_IE_DONE BIT(28)
+#define DMA_MST_CTRL_REUSE BIT(27)
+#define DMA_MST_CTRL_BURST_1 (2 << 24)
+#define DMA_MST_CTRL_BURST_4 (3 << 24)
+#define DMA_MST_CTRL_BURST_8 (4 << 24)
+#define DMA_MST_CTRL_BURST_16 (5 << 24)
+#define DMA_MST_CTRL_IS_DONE BIT(20)
+#define DMA_MST_CTRL_EN_A BIT(2)
+#define DMA_MST_CTRL_EN_B BIT(1)
+
+#define DMA_CFG_A 0x34
+#define DMA_CFG_B 0x38
+
+#define FIFO_CTRL 0x3c
+#define FIFO_CTRL_CLR_ALL BIT(3)
+
+#define DATA_PTR 0x40
+#define TAG_PTR 0x44
+#define ECC_PTR 0x48
+
+#define DEC_STATUS 0x4c
+#define DEC_STATUS_A_ECC_FAIL BIT(1)
+#define DEC_STATUS_ERR_COUNT_MASK 0x00ff0000
+#define DEC_STATUS_ERR_COUNT_SHIFT 16
+
+#define HWSTATUS_CMD 0x50
+#define HWSTATUS_MASK 0x54
+#define HWSTATUS_RDSTATUS_MASK(x) (((x) & 0xff) << 24)
+#define HWSTATUS_RDSTATUS_VALUE(x) (((x) & 0xff) << 16)
+#define HWSTATUS_RBSY_MASK(x) (((x) & 0xff) << 8)
+#define HWSTATUS_RBSY_VALUE(x) (((x) & 0xff) << 0)
+
+#define BCH_CONFIG 0xcc
+#define BCH_ENABLE BIT(0)
+#define BCH_TVAL_4 (0 << 4)
+#define BCH_TVAL_8 (1 << 4)
+#define BCH_TVAL_14 (2 << 4)
+#define BCH_TVAL_16 (3 << 4)
+
+#define DEC_STAT_RESULT 0xd0
+#define DEC_STAT_BUF 0xd4
+#define DEC_STAT_BUF_FAIL_SEC_FLAG_MASK 0xff000000
+#define DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT 24
+#define DEC_STAT_BUF_CORR_SEC_FLAG_MASK 0x00ff0000
+#define DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT 16
+#define DEC_STAT_BUF_MAX_CORR_CNT_MASK 0x00001f00
+#define DEC_STAT_BUF_MAX_CORR_CNT_SHIFT 8
+
+#define OFFSET(val, off) ((val) < (off) ? 0 : (val) - (off))
+
+#define SKIP_SPARE_BYTES 4
+#define BITS_PER_STEP_RS 18
+#define BITS_PER_STEP_BCH 13
+
+#define INT_MASK (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
+#define HWSTATUS_CMD_DEFAULT NAND_STATUS_READY
+#define HWSTATUS_MASK_DEFAULT (HWSTATUS_RDSTATUS_MASK(1) | \
+ HWSTATUS_RDSTATUS_VALUE(0) | \
+ HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
+ HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
+
+struct tegra_nand_controller {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct completion command_complete;
+ struct completion dma_complete;
+ bool last_read_error;
+ int cur_cs;
+ struct nand_chip *chip;
+};
+
+struct tegra_nand_chip {
+ struct nand_chip chip;
+ struct gpio_desc *wp_gpio;
+ struct mtd_oob_region ecc;
+ u32 config;
+ u32 config_ecc;
+ u32 bch_config;
+ int cs[1];
+};
+
+static inline struct tegra_nand_controller *
+ to_tegra_ctrl(struct nand_controller *hw_ctrl)
+{
+ return container_of(hw_ctrl, struct tegra_nand_controller, controller);
+}
+
+static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct tegra_nand_chip, chip);
+}
+
+static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
+ BITS_PER_BYTE);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = SKIP_SPARE_BYTES;
+ oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ return 0;
+}
+
+static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ return -ERANGE;
+}
+
+static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
+ .ecc = tegra_nand_ooblayout_rs_ecc,
+ .free = tegra_nand_ooblayout_no_free,
+};
+
+static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
+ BITS_PER_BYTE);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = SKIP_SPARE_BYTES;
+ oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
+ .ecc = tegra_nand_ooblayout_bch_ecc,
+ .free = tegra_nand_ooblayout_no_free,
+};
+
+static irqreturn_t tegra_nand_irq(int irq, void *data)
+{
+ struct tegra_nand_controller *ctrl = data;
+ u32 isr, dma;
+
+ isr = readl_relaxed(ctrl->regs + ISR);
+ dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
+ dev_dbg(ctrl->dev, "isr %08x\n", isr);
+
+ if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
+ return IRQ_NONE;
+
+ /*
+ * The bit name is somewhat missleading: This is also set when
+ * HW ECC was successful. The data sheet states:
+ * Correctable OR Un-correctable errors occurred in the DMA transfer...
+ */
+ if (isr & ISR_CORRFAIL_ERR)
+ ctrl->last_read_error = true;
+
+ if (isr & ISR_CMD_DONE)
+ complete(&ctrl->command_complete);
+
+ if (isr & ISR_UND)
+ dev_err(ctrl->dev, "FIFO underrun\n");
+
+ if (isr & ISR_OVR)
+ dev_err(ctrl->dev, "FIFO overrun\n");
+
+ /* handle DMA interrupts */
+ if (dma & DMA_MST_CTRL_IS_DONE) {
+ writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
+ complete(&ctrl->dma_complete);
+ }
+
+ /* clear interrupts */
+ writel_relaxed(isr, ctrl->regs + ISR);
+
+ return IRQ_HANDLED;
+}
+
+static const char * const tegra_nand_reg_names[] = {
+ "COMMAND",
+ "STATUS",
+ "ISR",
+ "IER",
+ "CONFIG",
+ "TIMING",
+ NULL,
+ "TIMING2",
+ "CMD_REG1",
+ "CMD_REG2",
+ "ADDR_REG1",
+ "ADDR_REG2",
+ "DMA_MST_CTRL",
+ "DMA_CFG_A",
+ "DMA_CFG_B",
+ "FIFO_CTRL",
+};
+
+static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
+{
+ u32 reg;
+ int i;
+
+ dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
+ for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
+ const char *reg_name = tegra_nand_reg_names[i];
+
+ if (!reg_name)
+ continue;
+
+ reg = readl_relaxed(ctrl->regs + (i * 4));
+ dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
+ }
+}
+
+static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
+{
+ u32 isr, dma;
+
+ disable_irq(ctrl->irq);
+
+ /* Abort current command/DMA operation */
+ writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
+ writel_relaxed(0, ctrl->regs + COMMAND);
+
+ /* clear interrupts */
+ isr = readl_relaxed(ctrl->regs + ISR);
+ writel_relaxed(isr, ctrl->regs + ISR);
+ dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
+ writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
+
+ reinit_completion(&ctrl->command_complete);
+ reinit_completion(&ctrl->dma_complete);
+
+ enable_irq(ctrl->irq);
+}
+
+static int tegra_nand_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ const struct nand_op_instr *instr_data_in = NULL;
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ unsigned int op_id, size = 0, offset = 0;
+ bool first_cmd = true;
+ u32 reg, cmd = 0;
+ int ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int naddrs, i;
+ const u8 *addrs;
+ u32 addr1 = 0, addr2 = 0;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd) {
+ cmd |= COMMAND_CLE;
+ writel_relaxed(instr->ctx.cmd.opcode,
+ ctrl->regs + CMD_REG1);
+ } else {
+ cmd |= COMMAND_SEC_CMD;
+ writel_relaxed(instr->ctx.cmd.opcode,
+ ctrl->regs + CMD_REG2);
+ }
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ addr1 |= *addrs++ << (BITS_PER_BYTE * i);
+ naddrs -= i;
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ addr2 |= *addrs++ << (BITS_PER_BYTE * i);
+
+ writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
+ writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ size = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
+ COMMAND_RX | COMMAND_A_VALID;
+
+ instr_data_in = instr;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ size = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
+ COMMAND_TX | COMMAND_A_VALID;
+ memcpy(&reg, instr->ctx.data.buf.out + offset, size);
+
+ writel_relaxed(reg, ctrl->regs + RESP);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ cmd |= COMMAND_RBSY_CHK;
+ break;
+ }
+ }
+
+ cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
+ writel_relaxed(cmd, ctrl->regs + COMMAND);
+ ret = wait_for_completion_timeout(&ctrl->command_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "COMMAND timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ return -ETIMEDOUT;
+ }
+
+ if (instr_data_in) {
+ reg = readl_relaxed(ctrl->regs + RESP);
+ memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
+ }
+
+ return 0;
+}
+
+static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
+ );
+
+static void tegra_nand_select_target(struct nand_chip *chip,
+ unsigned int die_nr)
+{
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+
+ ctrl->cur_cs = nand->cs[die_nr];
+}
+
+static int tegra_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (!check_only)
+ tegra_nand_select_target(chip, op->cs);
+
+ return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
+ check_only);
+}
+
+static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
+ struct nand_chip *chip, bool enable)
+{
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH && enable)
+ writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
+ else
+ writel_relaxed(0, ctrl->regs + BCH_CONFIG);
+
+ if (enable)
+ writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
+ else
+ writel_relaxed(nand->config, ctrl->regs + CONFIG);
+}
+
+static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
+ void *buf, void *oob_buf, int oob_len, int page,
+ bool read)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ dma_addr_t dma_addr = 0, dma_addr_oob = 0;
+ u32 addr1, cmd, dma_ctrl;
+ int ret;
+
+ tegra_nand_select_target(chip, chip->cur_cs);
+
+ if (read) {
+ writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
+ writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
+ } else {
+ writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
+ writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
+ }
+ cmd = COMMAND_CLE | COMMAND_SEC_CMD;
+
+ /* Lower 16-bits are column, by default 0 */
+ addr1 = page << 16;
+
+ if (!buf)
+ addr1 |= mtd->writesize;
+ writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
+ } else {
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
+ }
+
+ if (buf) {
+ dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
+ ret = dma_mapping_error(ctrl->dev, dma_addr);
+ if (ret) {
+ dev_err(ctrl->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
+ writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
+ }
+
+ if (oob_buf) {
+ dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
+ dir);
+ ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
+ if (ret) {
+ dev_err(ctrl->dev, "dma mapping error\n");
+ ret = -EINVAL;
+ goto err_unmap_dma_page;
+ }
+
+ writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
+ writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
+ }
+
+ dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
+ DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
+ DMA_MST_CTRL_BURST_16;
+
+ if (buf)
+ dma_ctrl |= DMA_MST_CTRL_EN_A;
+ if (oob_buf)
+ dma_ctrl |= DMA_MST_CTRL_EN_B;
+
+ if (read)
+ dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
+ else
+ dma_ctrl |= DMA_MST_CTRL_OUT;
+
+ writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
+
+ cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
+ COMMAND_CE(ctrl->cur_cs);
+
+ if (buf)
+ cmd |= COMMAND_A_VALID;
+ if (oob_buf)
+ cmd |= COMMAND_B_VALID;
+
+ if (read)
+ cmd |= COMMAND_RX;
+ else
+ cmd |= COMMAND_TX | COMMAND_AFT_DAT;
+
+ writel_relaxed(cmd, ctrl->regs + COMMAND);
+
+ ret = wait_for_completion_timeout(&ctrl->command_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "COMMAND timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ ret = -ETIMEDOUT;
+ goto err_unmap_dma;
+ }
+
+ ret = wait_for_completion_timeout(&ctrl->dma_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "DMA timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ ret = -ETIMEDOUT;
+ goto err_unmap_dma;
+ }
+ ret = 0;
+
+err_unmap_dma:
+ if (oob_buf)
+ dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
+err_unmap_dma_page:
+ if (buf)
+ dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
+
+ return ret;
+}
+
+static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
+ mtd->oobsize, page, true);
+}
+
+static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
+ mtd->oobsize, page, false);
+}
+
+static int tegra_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
+ mtd->oobsize, page, true);
+}
+
+static int tegra_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
+ mtd->oobsize, page, false);
+}
+
+static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+ u32 dec_stat, max_corr_cnt;
+ unsigned long fail_sec_flag;
+ int ret;
+
+ tegra_nand_hw_ecc(ctrl, chip, true);
+ ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
+ tegra_nand_hw_ecc(ctrl, chip, false);
+ if (ret)
+ return ret;
+
+ /* No correctable or un-correctable errors, page must have 0 bitflips */
+ if (!ctrl->last_read_error)
+ return 0;
+
+ /*
+ * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
+ * which contains information for all ECC selections.
+ *
+ * Note that since we do not use Command Queues DEC_RESULT does not
+ * state the number of pages we can read from the DEC_STAT_BUF. But
+ * since CORRFAIL_ERR did occur during page read we do have a valid
+ * result in DEC_STAT_BUF.
+ */
+ ctrl->last_read_error = false;
+ dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
+
+ fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
+ DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
+
+ max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
+ DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
+
+ if (fail_sec_flag) {
+ int bit, max_bitflips = 0;
+
+ /*
+ * Since we do not support subpage writes, a complete page
+ * is either written or not. We can take a shortcut here by
+ * checking wheather any of the sector has been successful
+ * read. If at least one sectors has been read successfully,
+ * the page must have been a written previously. It cannot
+ * be an erased page.
+ *
+ * E.g. controller might return fail_sec_flag with 0x4, which
+ * would mean only the third sector failed to correct. The
+ * page must have been written and the third sector is really
+ * not correctable anymore.
+ */
+ if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
+ mtd->ecc_stats.failed += hweight8(fail_sec_flag);
+ return max_corr_cnt;
+ }
+
+ /*
+ * All sectors failed to correct, but the ECC isn't smart
+ * enough to figure out if a page is really just erased.
+ * Read OOB data and check whether data/OOB is completely
+ * erased or if error correction just failed for all sub-
+ * pages.
+ */
+ ret = tegra_nand_read_oob(chip, page);
+ if (ret < 0)
+ return ret;
+
+ for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
+ u8 *data = buf + (chip->ecc.size * bit);
+ u8 *oob = chip->oob_poi + nand->ecc.offset +
+ (chip->ecc.bytes * bit);
+
+ ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
+ oob, chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max(ret, max_bitflips);
+ }
+ }
+
+ return max_t(unsigned int, max_corr_cnt, max_bitflips);
+ } else {
+ int corr_sec_flag;
+
+ corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
+ DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
+
+ /*
+ * The value returned in the register is the maximum of
+ * bitflips encountered in any of the ECC regions. As there is
+ * no way to get the number of bitflips in a specific regions
+ * we are not able to deliver correct stats but instead
+ * overestimate the number of corrected bitflips by assuming
+ * that all regions where errors have been corrected
+ * encountered the maximum number of bitflips.
+ */
+ mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
+
+ return max_corr_cnt;
+ }
+}
+
+static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+ int ret;
+
+ tegra_nand_hw_ecc(ctrl, chip, true);
+ ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
+ 0, page, false);
+ tegra_nand_hw_ecc(ctrl, chip, false);
+
+ return ret;
+}
+
+static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
+ const struct nand_sdr_timings *timings)
+{
+ /*
+ * The period (and all other timings in this function) is in ps,
+ * so need to take care here to avoid integer overflows.
+ */
+ unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
+ unsigned int period = DIV_ROUND_UP(1000000, rate);
+ u32 val, reg = 0;
+
+ val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
+ timings->tRC_min), period);
+ reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
+
+ val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
+ max(timings->tALS_min, timings->tALH_min)),
+ period);
+ reg |= TIMING_TCS(OFFSET(val, 2));
+
+ val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
+ period);
+ reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
+
+ reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
+ reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
+ reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
+ reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
+ reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
+
+ writel_relaxed(reg, ctrl->regs + TIMING_1);
+
+ val = DIV_ROUND_UP(timings->tADL_min, period);
+ reg = TIMING_TADL(OFFSET(val, 3));
+
+ writel_relaxed(reg, ctrl->regs + TIMING_2);
+}
+
+static int tegra_nand_setup_interface(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ const struct nand_sdr_timings *timings;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ tegra_nand_setup_timing(ctrl, timings);
+
+ return 0;
+}
+
+static const int rs_strength_bootable[] = { 4 };
+static const int rs_strength[] = { 4, 6, 8 };
+static const int bch_strength_bootable[] = { 8, 16 };
+static const int bch_strength[] = { 4, 8, 14, 16 };
+
+static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
+ int strength_len, int bits_per_step,
+ int oobsize)
+{
+ struct nand_device *base = mtd_to_nanddev(nand_to_mtd(chip));
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(base);
+ bool maximize = base->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH;
+ int i;
+
+ /*
+ * Loop through available strengths. Backwards in case we try to
+ * maximize the BCH strength.
+ */
+ for (i = 0; i < strength_len; i++) {
+ int strength_sel, bytes_per_step, bytes_per_page;
+
+ if (maximize) {
+ strength_sel = strength[strength_len - i - 1];
+ } else {
+ strength_sel = strength[i];
+
+ if (strength_sel < requirements->strength)
+ continue;
+ }
+
+ bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
+ BITS_PER_BYTE);
+ bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ /* Check whether strength fits OOB */
+ if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
+ return strength_sel;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
+{
+ const int *strength;
+ int strength_len, bits_per_step;
+
+ switch (chip->ecc.algo) {
+ case NAND_ECC_ALGO_RS:
+ bits_per_step = BITS_PER_STEP_RS;
+ if (chip->options & NAND_IS_BOOT_MEDIUM) {
+ strength = rs_strength_bootable;
+ strength_len = ARRAY_SIZE(rs_strength_bootable);
+ } else {
+ strength = rs_strength;
+ strength_len = ARRAY_SIZE(rs_strength);
+ }
+ break;
+ case NAND_ECC_ALGO_BCH:
+ bits_per_step = BITS_PER_STEP_BCH;
+ if (chip->options & NAND_IS_BOOT_MEDIUM) {
+ strength = bch_strength_bootable;
+ strength_len = ARRAY_SIZE(bch_strength_bootable);
+ } else {
+ strength = bch_strength;
+ strength_len = ARRAY_SIZE(bch_strength);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tegra_nand_get_strength(chip, strength, strength_len,
+ bits_per_step, oobsize);
+}
+
+static int tegra_nand_attach_chip(struct nand_chip *chip)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bits_per_step;
+ int ret;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ if (requirements->step_size != 512) {
+ dev_err(ctrl->dev, "Unsupported step size %d\n",
+ requirements->step_size);
+ return -EINVAL;
+ }
+
+ chip->ecc.read_page = tegra_nand_read_page_hwecc;
+ chip->ecc.write_page = tegra_nand_write_page_hwecc;
+ chip->ecc.read_page_raw = tegra_nand_read_page_raw;
+ chip->ecc.write_page_raw = tegra_nand_write_page_raw;
+ chip->ecc.read_oob = tegra_nand_read_oob;
+ chip->ecc.write_oob = tegra_nand_write_oob;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ nand->config |= CONFIG_BUS_WIDTH_16;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
+ if (mtd->writesize < 2048)
+ chip->ecc.algo = NAND_ECC_ALGO_RS;
+ else
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH && mtd->writesize < 2048) {
+ dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
+ return -EINVAL;
+ }
+
+ if (!chip->ecc.strength) {
+ ret = tegra_nand_select_strength(chip, mtd->oobsize);
+ if (ret < 0) {
+ dev_err(ctrl->dev,
+ "No valid strength found, minimum %d\n",
+ requirements->strength);
+ return ret;
+ }
+
+ chip->ecc.strength = ret;
+ }
+
+ nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
+ CONFIG_SKIP_SPARE_SIZE_4;
+
+ switch (chip->ecc.algo) {
+ case NAND_ECC_ALGO_RS:
+ bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
+ mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
+ nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
+ CONFIG_ERR_COR;
+ switch (chip->ecc.strength) {
+ case 4:
+ nand->config_ecc |= CONFIG_TVAL_4;
+ break;
+ case 6:
+ nand->config_ecc |= CONFIG_TVAL_6;
+ break;
+ case 8:
+ nand->config_ecc |= CONFIG_TVAL_8;
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC strength %d not supported\n",
+ chip->ecc.strength);
+ return -EINVAL;
+ }
+ break;
+ case NAND_ECC_ALGO_BCH:
+ bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
+ mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
+ nand->bch_config = BCH_ENABLE;
+ switch (chip->ecc.strength) {
+ case 4:
+ nand->bch_config |= BCH_TVAL_4;
+ break;
+ case 8:
+ nand->bch_config |= BCH_TVAL_8;
+ break;
+ case 14:
+ nand->bch_config |= BCH_TVAL_14;
+ break;
+ case 16:
+ nand->bch_config |= BCH_TVAL_16;
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC strength %d not supported\n",
+ chip->ecc.strength);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC algorithm not supported\n");
+ return -EINVAL;
+ }
+
+ dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
+ chip->ecc.algo == NAND_ECC_ALGO_BCH ? "BCH" : "RS",
+ chip->ecc.strength);
+
+ chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
+
+ switch (mtd->writesize) {
+ case 256:
+ nand->config |= CONFIG_PS_256;
+ break;
+ case 512:
+ nand->config |= CONFIG_PS_512;
+ break;
+ case 1024:
+ nand->config |= CONFIG_PS_1024;
+ break;
+ case 2048:
+ nand->config |= CONFIG_PS_2048;
+ break;
+ case 4096:
+ nand->config |= CONFIG_PS_4096;
+ break;
+ default:
+ dev_err(ctrl->dev, "Unsupported writesize %d\n",
+ mtd->writesize);
+ return -ENODEV;
+ }
+
+ /* Store complete configuration for HW ECC in config_ecc */
+ nand->config_ecc |= nand->config;
+
+ /* Non-HW ECC read/writes complete OOB */
+ nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
+ writel_relaxed(nand->config, ctrl->regs + CONFIG);
+
+ return 0;
+}
+
+static const struct nand_controller_ops tegra_nand_controller_ops = {
+ .attach_chip = &tegra_nand_attach_chip,
+ .exec_op = tegra_nand_exec_op,
+ .setup_interface = tegra_nand_setup_interface,
+};
+
+static int tegra_nand_chips_init(struct device *dev,
+ struct tegra_nand_controller *ctrl)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *np_nand;
+ int nsels, nchips = of_get_child_count(np);
+ struct tegra_nand_chip *nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int ret;
+ u32 cs;
+
+ if (nchips != 1) {
+ dev_err(dev, "Currently only one NAND chip supported\n");
+ return -EINVAL;
+ }
+
+ np_nand = of_get_next_child(np, NULL);
+
+ nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
+ if (nsels != 1) {
+ dev_err(dev, "Missing/invalid reg property\n");
+ return -EINVAL;
+ }
+
+ /* Retrieve CS id, currently only single die NAND supported */
+ ret = of_property_read_u32(np_nand, "reg", &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n", ret);
+ return ret;
+ }
+
+ nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand->cs[0] = cs;
+
+ nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
+
+ if (IS_ERR(nand->wp_gpio)) {
+ ret = PTR_ERR(nand->wp_gpio);
+ dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
+ return ret;
+ }
+
+ chip = &nand->chip;
+ chip->controller = &ctrl->controller;
+
+ mtd = nand_to_mtd(chip);
+
+ mtd->dev.parent = dev;
+ mtd->owner = THIS_MODULE;
+
+ nand_set_flash_node(chip, np_nand);
+
+ if (!mtd->name)
+ mtd->name = "tegra_nand";
+
+ chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ ctrl->chip = chip;
+
+ return 0;
+}
+
+static int tegra_nand_probe(struct platform_device *pdev)
+{
+ struct reset_control *rst;
+ struct tegra_nand_controller *ctrl;
+ struct resource *res;
+ int err = 0;
+
+ ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = &pdev->dev;
+ nand_controller_init(&ctrl->controller);
+ ctrl->controller.ops = &tegra_nand_controller_ops;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctrl->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctrl->regs))
+ return PTR_ERR(ctrl->regs);
+
+ rst = devm_reset_control_get(&pdev->dev, "nand");
+ if (IS_ERR(rst))
+ return PTR_ERR(rst);
+
+ ctrl->clk = devm_clk_get(&pdev->dev, "nand");
+ if (IS_ERR(ctrl->clk))
+ return PTR_ERR(ctrl->clk);
+
+ err = clk_prepare_enable(ctrl->clk);
+ if (err)
+ return err;
+
+ err = reset_control_reset(rst);
+ if (err) {
+ dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
+ goto err_disable_clk;
+ }
+
+ writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
+ writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
+ writel_relaxed(INT_MASK, ctrl->regs + IER);
+
+ init_completion(&ctrl->command_complete);
+ init_completion(&ctrl->dma_complete);
+
+ ctrl->irq = platform_get_irq(pdev, 0);
+ err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
+ dev_name(&pdev->dev), ctrl);
+ if (err) {
+ dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
+ goto err_disable_clk;
+ }
+
+ writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
+
+ err = tegra_nand_chips_init(ctrl->dev, ctrl);
+ if (err)
+ goto err_disable_clk;
+
+ platform_set_drvdata(pdev, ctrl);
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(ctrl->clk);
+ return err;
+}
+
+static int tegra_nand_remove(struct platform_device *pdev)
+{
+ struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
+ struct nand_chip *chip = ctrl->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ nand_cleanup(chip);
+
+ clk_disable_unprepare(ctrl->clk);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_nand_of_match[] = {
+ { .compatible = "nvidia,tegra20-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
+
+static struct platform_driver tegra_nand_driver = {
+ .driver = {
+ .name = "tegra-nand",
+ .of_match_table = tegra_nand_of_match,
+ },
+ .probe = tegra_nand_probe,
+ .remove = tegra_nand_remove,
+};
+module_platform_driver(tegra_nand_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
+MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
+MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
+MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
new file mode 100644
index 000000000..aa6c7e7bb
--- /dev/null
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -0,0 +1,531 @@
+/*
+ * Toshiba TMIO NAND flash controller driver
+ *
+ * Slightly murky pre-git history of the driver:
+ *
+ * Copyright (c) Ian Molton 2004, 2005, 2008
+ * Original work, independent of sharps code. Included hardware ECC support.
+ * Hard ECC did not work for writes in the early revisions.
+ * Copyright (c) Dirk Opfer 2005.
+ * Modifications developed from sharps code but
+ * NOT containing any, ported onto Ians base.
+ * Copyright (c) Chris Humbert 2005
+ * Copyright (c) Dmitry Baryshkov 2008
+ * Minor fixes
+ *
+ * Parts copyright Sebastian Carlier
+ *
+ * This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+
+/*--------------------------------------------------------------------------*/
+
+/*
+ * NAND Flash Host Controller Configuration Register
+ */
+#define CCR_COMMAND 0x04 /* w Command */
+#define CCR_BASE 0x10 /* l NAND Flash Control Reg Base Addr */
+#define CCR_INTP 0x3d /* b Interrupt Pin */
+#define CCR_INTE 0x48 /* b Interrupt Enable */
+#define CCR_EC 0x4a /* b Event Control */
+#define CCR_ICC 0x4c /* b Internal Clock Control */
+#define CCR_ECCC 0x5b /* b ECC Control */
+#define CCR_NFTC 0x60 /* b NAND Flash Transaction Control */
+#define CCR_NFM 0x61 /* b NAND Flash Monitor */
+#define CCR_NFPSC 0x62 /* b NAND Flash Power Supply Control */
+#define CCR_NFDC 0x63 /* b NAND Flash Detect Control */
+
+/*
+ * NAND Flash Control Register
+ */
+#define FCR_DATA 0x00 /* bwl Data Register */
+#define FCR_MODE 0x04 /* b Mode Register */
+#define FCR_STATUS 0x05 /* b Status Register */
+#define FCR_ISR 0x06 /* b Interrupt Status Register */
+#define FCR_IMR 0x07 /* b Interrupt Mask Register */
+
+/* FCR_MODE Register Command List */
+#define FCR_MODE_DATA 0x94 /* Data Data_Mode */
+#define FCR_MODE_COMMAND 0x95 /* Data Command_Mode */
+#define FCR_MODE_ADDRESS 0x96 /* Data Address_Mode */
+
+#define FCR_MODE_HWECC_CALC 0xB4 /* HW-ECC Data */
+#define FCR_MODE_HWECC_RESULT 0xD4 /* HW-ECC Calc result Read_Mode */
+#define FCR_MODE_HWECC_RESET 0xF4 /* HW-ECC Reset */
+
+#define FCR_MODE_POWER_ON 0x0C /* Power Supply ON to SSFDC card */
+#define FCR_MODE_POWER_OFF 0x08 /* Power Supply OFF to SSFDC card */
+
+#define FCR_MODE_LED_OFF 0x00 /* LED OFF */
+#define FCR_MODE_LED_ON 0x04 /* LED ON */
+
+#define FCR_MODE_EJECT_ON 0x68 /* Ejection events active */
+#define FCR_MODE_EJECT_OFF 0x08 /* Ejection events ignored */
+
+#define FCR_MODE_LOCK 0x6C /* Lock_Mode. Eject Switch Invalid */
+#define FCR_MODE_UNLOCK 0x0C /* UnLock_Mode. Eject Switch is valid */
+
+#define FCR_MODE_CONTROLLER_ID 0x40 /* Controller ID Read */
+#define FCR_MODE_STANDBY 0x00 /* SSFDC card Changes Standby State */
+
+#define FCR_MODE_WE 0x80
+#define FCR_MODE_ECC1 0x40
+#define FCR_MODE_ECC0 0x20
+#define FCR_MODE_CE 0x10
+#define FCR_MODE_PCNT1 0x08
+#define FCR_MODE_PCNT0 0x04
+#define FCR_MODE_ALE 0x02
+#define FCR_MODE_CLE 0x01
+
+#define FCR_STATUS_BUSY 0x80
+
+/*--------------------------------------------------------------------------*/
+
+struct tmio_nand {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ struct completion comp;
+
+ struct platform_device *dev;
+
+ void __iomem *ccr;
+ void __iomem *fcr;
+ unsigned long fcr_base;
+
+ unsigned int irq;
+
+ /* for tmio_nand_read_byte */
+ u8 read;
+ unsigned read_good:1;
+};
+
+static inline struct tmio_nand *mtd_to_tmio(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct tmio_nand, chip);
+}
+
+
+/*--------------------------------------------------------------------------*/
+
+static void tmio_nand_hwcontrol(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ u8 mode;
+
+ if (ctrl & NAND_NCE) {
+ mode = FCR_MODE_DATA;
+
+ if (ctrl & NAND_CLE)
+ mode |= FCR_MODE_CLE;
+ else
+ mode &= ~FCR_MODE_CLE;
+
+ if (ctrl & NAND_ALE)
+ mode |= FCR_MODE_ALE;
+ else
+ mode &= ~FCR_MODE_ALE;
+ } else {
+ mode = FCR_MODE_STANDBY;
+ }
+
+ tmio_iowrite8(mode, tmio->fcr + FCR_MODE);
+ tmio->read_good = 0;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ tmio_iowrite8(cmd, chip->legacy.IO_ADDR_W);
+}
+
+static int tmio_nand_dev_ready(struct nand_chip *chip)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
+
+ return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY);
+}
+
+static irqreturn_t tmio_irq(int irq, void *__tmio)
+{
+ struct tmio_nand *tmio = __tmio;
+
+ /* disable RDYREQ interrupt */
+ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+ complete(&tmio->comp);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ *The TMIO core has a RDYREQ interrupt on the posedge of #SMRB.
+ *This interrupt is normally disabled, but for long operations like
+ *erase and write, we enable it to wake us up. The irq handler
+ *disables the interrupt.
+ */
+static int tmio_nand_wait(struct nand_chip *nand_chip)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(nand_chip));
+ long timeout;
+ u8 status;
+
+ /* enable RDYREQ interrupt */
+
+ tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
+ reinit_completion(&tmio->comp);
+ tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
+
+ timeout = 400;
+ timeout = wait_for_completion_timeout(&tmio->comp,
+ msecs_to_jiffies(timeout));
+
+ if (unlikely(!tmio_nand_dev_ready(nand_chip))) {
+ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+ dev_warn(&tmio->dev->dev, "still busy after 400 ms\n");
+
+ } else if (unlikely(!timeout)) {
+ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+ dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
+ }
+
+ nand_status_op(nand_chip, &status);
+ return status;
+}
+
+/*
+ *The TMIO controller combines two 8-bit data bytes into one 16-bit
+ *word. This function separates them so nand_base.c works as expected,
+ *especially its NAND_CMD_READID routines.
+ *
+ *To prevent stale data from being read, tmio_nand_hwcontrol() clears
+ *tmio->read_good.
+ */
+static u_char tmio_nand_read_byte(struct nand_chip *chip)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
+ unsigned int data;
+
+ if (tmio->read_good--)
+ return tmio->read;
+
+ data = tmio_ioread16(tmio->fcr + FCR_DATA);
+ tmio->read = data >> 8;
+ return data;
+}
+
+/*
+ *The TMIO controller converts an 8-bit NAND interface to a 16-bit
+ *bus interface, so all data reads and writes must be 16-bit wide.
+ *Thus, we implement 16-bit versions of the read, write, and verify
+ *buffer functions.
+ */
+static void
+tmio_nand_write_buf(struct nand_chip *chip, const u_char *buf, int len)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
+
+ tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
+}
+
+static void tmio_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
+
+ tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
+}
+
+static void tmio_nand_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
+
+ tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE);
+ tmio_ioread8(tmio->fcr + FCR_DATA); /* dummy read */
+ tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE);
+}
+
+static int tmio_nand_calculate_ecc(struct nand_chip *chip, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
+ unsigned int ecc;
+
+ tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE);
+
+ ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+ ecc_code[1] = ecc; /* 000-255 LP7-0 */
+ ecc_code[0] = ecc >> 8; /* 000-255 LP15-8 */
+ ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+ ecc_code[2] = ecc; /* 000-255 CP5-0,11b */
+ ecc_code[4] = ecc >> 8; /* 256-511 LP7-0 */
+ ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+ ecc_code[3] = ecc; /* 256-511 LP15-8 */
+ ecc_code[5] = ecc >> 8; /* 256-511 CP5-0,11b */
+
+ tmio_iowrite8(FCR_MODE_DATA, tmio->fcr + FCR_MODE);
+ return 0;
+}
+
+static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ int r0, r1;
+
+ /* assume ecc.size = 512 and ecc.bytes = 6 */
+ r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256, false);
+ if (r0 < 0)
+ return r0;
+ r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256,
+ false);
+ if (r1 < 0)
+ return r1;
+ return r0 + r1;
+}
+
+static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+ int ret;
+
+ if (cell->enable) {
+ ret = cell->enable(dev);
+ if (ret)
+ return ret;
+ }
+
+ /* (4Ch) CLKRUN Enable 1st spcrunc */
+ tmio_iowrite8(0x81, tmio->ccr + CCR_ICC);
+
+ /* (10h)BaseAddress 0x1000 spba.spba2 */
+ tmio_iowrite16(tmio->fcr_base, tmio->ccr + CCR_BASE);
+ tmio_iowrite16(tmio->fcr_base >> 16, tmio->ccr + CCR_BASE + 2);
+
+ /* (04h)Command Register I/O spcmd */
+ tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND);
+
+ /* (62h) Power Supply Control ssmpwc */
+ /* HardPowerOFF - SuspendOFF - PowerSupplyWait_4MS */
+ tmio_iowrite8(0x02, tmio->ccr + CCR_NFPSC);
+
+ /* (63h) Detect Control ssmdtc */
+ tmio_iowrite8(0x02, tmio->ccr + CCR_NFDC);
+
+ /* Interrupt status register clear sintst */
+ tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
+
+ /* After power supply, Media are reset smode */
+ tmio_iowrite8(FCR_MODE_POWER_ON, tmio->fcr + FCR_MODE);
+ tmio_iowrite8(FCR_MODE_COMMAND, tmio->fcr + FCR_MODE);
+ tmio_iowrite8(NAND_CMD_RESET, tmio->fcr + FCR_DATA);
+
+ /* Standby Mode smode */
+ tmio_iowrite8(FCR_MODE_STANDBY, tmio->fcr + FCR_MODE);
+
+ mdelay(5);
+
+ return 0;
+}
+
+static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+
+ tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
+ if (cell->disable)
+ cell->disable(dev);
+}
+
+static int tmio_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ chip->ecc.strength = 2;
+ chip->ecc.hwctl = tmio_nand_enable_hwecc;
+ chip->ecc.calculate = tmio_nand_calculate_ecc;
+ chip->ecc.correct = tmio_nand_correct_data;
+
+ return 0;
+}
+
+static const struct nand_controller_ops tmio_ops = {
+ .attach_chip = tmio_attach_chip,
+};
+
+static int tmio_probe(struct platform_device *dev)
+{
+ struct tmio_nand_data *data = dev_get_platdata(&dev->dev);
+ struct resource *fcr = platform_get_resource(dev,
+ IORESOURCE_MEM, 0);
+ struct resource *ccr = platform_get_resource(dev,
+ IORESOURCE_MEM, 1);
+ int irq = platform_get_irq(dev, 0);
+ struct tmio_nand *tmio;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int retval;
+
+ if (data == NULL)
+ dev_warn(&dev->dev, "NULL platform data!\n");
+
+ tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL);
+ if (!tmio)
+ return -ENOMEM;
+
+ init_completion(&tmio->comp);
+
+ tmio->dev = dev;
+
+ platform_set_drvdata(dev, tmio);
+ nand_chip = &tmio->chip;
+ mtd = nand_to_mtd(nand_chip);
+ mtd->name = "tmio-nand";
+ mtd->dev.parent = &dev->dev;
+
+ nand_controller_init(&tmio->controller);
+ tmio->controller.ops = &tmio_ops;
+ nand_chip->controller = &tmio->controller;
+
+ tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
+ if (!tmio->ccr)
+ return -EIO;
+
+ tmio->fcr_base = fcr->start & 0xfffff;
+ tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr));
+ if (!tmio->fcr)
+ return -EIO;
+
+ retval = tmio_hw_init(dev, tmio);
+ if (retval)
+ return retval;
+
+ /* Set address of NAND IO lines */
+ nand_chip->legacy.IO_ADDR_R = tmio->fcr;
+ nand_chip->legacy.IO_ADDR_W = tmio->fcr;
+
+ /* Set address of hardware control function */
+ nand_chip->legacy.cmd_ctrl = tmio_nand_hwcontrol;
+ nand_chip->legacy.dev_ready = tmio_nand_dev_ready;
+ nand_chip->legacy.read_byte = tmio_nand_read_byte;
+ nand_chip->legacy.write_buf = tmio_nand_write_buf;
+ nand_chip->legacy.read_buf = tmio_nand_read_buf;
+
+ if (data)
+ nand_chip->badblock_pattern = data->badblock_pattern;
+
+ /* 15 us command delay time */
+ nand_chip->legacy.chip_delay = 15;
+
+ retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0,
+ dev_name(&dev->dev), tmio);
+ if (retval) {
+ dev_err(&dev->dev, "request_irq error %d\n", retval);
+ goto err_irq;
+ }
+
+ tmio->irq = irq;
+ nand_chip->legacy.waitfunc = tmio_nand_wait;
+
+ /* Scan to find existence of the device */
+ retval = nand_scan(nand_chip, 1);
+ if (retval)
+ goto err_irq;
+
+ /* Register the partitions */
+ retval = mtd_device_parse_register(mtd,
+ data ? data->part_parsers : NULL,
+ NULL,
+ data ? data->partition : NULL,
+ data ? data->num_partitions : 0);
+ if (!retval)
+ return retval;
+
+ nand_cleanup(nand_chip);
+
+err_irq:
+ tmio_hw_stop(dev, tmio);
+ return retval;
+}
+
+static int tmio_remove(struct platform_device *dev)
+{
+ struct tmio_nand *tmio = platform_get_drvdata(dev);
+ struct nand_chip *chip = &tmio->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ tmio_hw_stop(dev, tmio);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tmio_suspend(struct platform_device *dev, pm_message_t state)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+
+ if (cell->suspend)
+ cell->suspend(dev);
+
+ tmio_hw_stop(dev, platform_get_drvdata(dev));
+ return 0;
+}
+
+static int tmio_resume(struct platform_device *dev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+
+ /* FIXME - is this required or merely another attack of the broken
+ * SHARP platform? Looks suspicious.
+ */
+ tmio_hw_init(dev, platform_get_drvdata(dev));
+
+ if (cell->resume)
+ cell->resume(dev);
+
+ return 0;
+}
+#else
+#define tmio_suspend NULL
+#define tmio_resume NULL
+#endif
+
+static struct platform_driver tmio_driver = {
+ .driver.name = "tmio-nand",
+ .driver.owner = THIS_MODULE,
+ .probe = tmio_probe,
+ .remove = tmio_remove,
+ .suspend = tmio_suspend,
+ .resume = tmio_resume,
+};
+
+module_platform_driver(tmio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
+MODULE_DESCRIPTION("NAND flash driver on Toshiba Mobile IO controller");
+MODULE_ALIAS("platform:tmio-nand");
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
new file mode 100644
index 000000000..fe8ed2441
--- /dev/null
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TXx9 NAND flash memory controller driver
+ * Based on RBTX49xx patch from CELF patch archive.
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2004-2007
+ * All Rights Reserved.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/platform_data/txx9/ndfmc.h>
+
+/* TXX9 NDFMC Registers */
+#define TXX9_NDFDTR 0x00
+#define TXX9_NDFMCR 0x04
+#define TXX9_NDFSR 0x08
+#define TXX9_NDFISR 0x0c
+#define TXX9_NDFIMR 0x10
+#define TXX9_NDFSPR 0x14
+#define TXX9_NDFRSTR 0x18 /* not TX4939 */
+
+/* NDFMCR : NDFMC Mode Control */
+#define TXX9_NDFMCR_WE 0x80
+#define TXX9_NDFMCR_ECC_ALL 0x60
+#define TXX9_NDFMCR_ECC_RESET 0x60
+#define TXX9_NDFMCR_ECC_READ 0x40
+#define TXX9_NDFMCR_ECC_ON 0x20
+#define TXX9_NDFMCR_ECC_OFF 0x00
+#define TXX9_NDFMCR_CE 0x10
+#define TXX9_NDFMCR_BSPRT 0x04 /* TX4925/TX4926 only */
+#define TXX9_NDFMCR_ALE 0x02
+#define TXX9_NDFMCR_CLE 0x01
+/* TX4939 only */
+#define TXX9_NDFMCR_X16 0x0400
+#define TXX9_NDFMCR_DMAREQ_MASK 0x0300
+#define TXX9_NDFMCR_DMAREQ_NODMA 0x0000
+#define TXX9_NDFMCR_DMAREQ_128 0x0100
+#define TXX9_NDFMCR_DMAREQ_256 0x0200
+#define TXX9_NDFMCR_DMAREQ_512 0x0300
+#define TXX9_NDFMCR_CS_MASK 0x0c
+#define TXX9_NDFMCR_CS(ch) ((ch) << 2)
+
+/* NDFMCR : NDFMC Status */
+#define TXX9_NDFSR_BUSY 0x80
+/* TX4939 only */
+#define TXX9_NDFSR_DMARUN 0x40
+
+/* NDFMCR : NDFMC Reset */
+#define TXX9_NDFRSTR_RST 0x01
+
+struct txx9ndfmc_priv {
+ struct platform_device *dev;
+ struct nand_chip chip;
+ int cs;
+ const char *mtdname;
+};
+
+#define MAX_TXX9NDFMC_DEV 4
+struct txx9ndfmc_drvdata {
+ struct mtd_info *mtds[MAX_TXX9NDFMC_DEV];
+ void __iomem *base;
+ unsigned char hold; /* in gbusclock */
+ unsigned char spw; /* in gbusclock */
+ struct nand_controller controller;
+};
+
+static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
+ return txx9_priv->dev;
+}
+
+static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
+{
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+
+ return drvdata->base + (reg << plat->shift);
+}
+
+static u32 txx9ndfmc_read(struct platform_device *dev, unsigned int reg)
+{
+ return __raw_readl(ndregaddr(dev, reg));
+}
+
+static void txx9ndfmc_write(struct platform_device *dev,
+ u32 val, unsigned int reg)
+{
+ __raw_writel(val, ndregaddr(dev, reg));
+}
+
+static uint8_t txx9ndfmc_read_byte(struct nand_chip *chip)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+
+ return txx9ndfmc_read(dev, TXX9_NDFDTR);
+}
+
+static void txx9ndfmc_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+ void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_WE, TXX9_NDFMCR);
+ while (len--)
+ __raw_writel(*buf++, ndfdtr);
+ txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
+}
+
+static void txx9ndfmc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+ void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+
+ while (len--)
+ *buf++ = __raw_readl(ndfdtr);
+}
+
+static void txx9ndfmc_cmd_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
+ struct platform_device *dev = txx9_priv->dev;
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~(TXX9_NDFMCR_CLE | TXX9_NDFMCR_ALE | TXX9_NDFMCR_CE);
+ mcr |= ctrl & NAND_CLE ? TXX9_NDFMCR_CLE : 0;
+ mcr |= ctrl & NAND_ALE ? TXX9_NDFMCR_ALE : 0;
+ /* TXX9_NDFMCR_CE bit is 0:high 1:low */
+ mcr |= ctrl & NAND_NCE ? TXX9_NDFMCR_CE : 0;
+ if (txx9_priv->cs >= 0 && (ctrl & NAND_NCE)) {
+ mcr &= ~TXX9_NDFMCR_CS_MASK;
+ mcr |= TXX9_NDFMCR_CS(txx9_priv->cs);
+ }
+ txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
+ }
+ if (cmd != NAND_CMD_NONE)
+ txx9ndfmc_write(dev, cmd & 0xff, TXX9_NDFDTR);
+ if (plat->flags & NDFMC_PLAT_FLAG_DUMMYWRITE) {
+ /* dummy write to update external latch */
+ if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
+ txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
+ }
+}
+
+static int txx9ndfmc_dev_ready(struct nand_chip *chip)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+
+ return !(txx9ndfmc_read(dev, TXX9_NDFSR) & TXX9_NDFSR_BUSY);
+}
+
+static int txx9ndfmc_calculate_ecc(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+ int eccbytes;
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~TXX9_NDFMCR_ECC_ALL;
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_READ, TXX9_NDFMCR);
+ for (eccbytes = chip->ecc.bytes; eccbytes > 0; eccbytes -= 3) {
+ ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code += 3;
+ }
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ return 0;
+}
+
+static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ int eccsize;
+ int corrected = 0;
+ int stat;
+
+ for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
+ stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256,
+ false);
+ if (stat < 0)
+ return stat;
+ corrected += stat;
+ buf += 256;
+ read_ecc += 3;
+ calc_ecc += 3;
+ }
+ return corrected;
+}
+
+static void txx9ndfmc_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~TXX9_NDFMCR_ECC_ALL;
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_RESET, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_ON, TXX9_NDFMCR);
+}
+
+static void txx9ndfmc_initialize(struct platform_device *dev)
+{
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ int tmout = 100;
+
+ if (plat->flags & NDFMC_PLAT_FLAG_NO_RSTR)
+ ; /* no NDFRSTR. Write to NDFSPR resets the NDFMC. */
+ else {
+ /* reset NDFMC */
+ txx9ndfmc_write(dev,
+ txx9ndfmc_read(dev, TXX9_NDFRSTR) |
+ TXX9_NDFRSTR_RST,
+ TXX9_NDFRSTR);
+ while (txx9ndfmc_read(dev, TXX9_NDFRSTR) & TXX9_NDFRSTR_RST) {
+ if (--tmout == 0) {
+ dev_err(&dev->dev, "reset failed.\n");
+ break;
+ }
+ udelay(1);
+ }
+ }
+ /* setup Hold Time, Strobe Pulse Width */
+ txx9ndfmc_write(dev, (drvdata->hold << 4) | drvdata->spw, TXX9_NDFSPR);
+ txx9ndfmc_write(dev,
+ (plat->flags & NDFMC_PLAT_FLAG_USE_BSPRT) ?
+ TXX9_NDFMCR_BSPRT : 0, TXX9_NDFMCR);
+}
+
+#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
+ DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
+
+static int txx9ndfmc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.strength = 1;
+
+ if (mtd->writesize >= 512) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ } else {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ }
+
+ chip->ecc.calculate = txx9ndfmc_calculate_ecc;
+ chip->ecc.correct = txx9ndfmc_correct_data;
+ chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
+
+ return 0;
+}
+
+static const struct nand_controller_ops txx9ndfmc_controller_ops = {
+ .attach_chip = txx9ndfmc_attach_chip,
+};
+
+static int __init txx9ndfmc_probe(struct platform_device *dev)
+{
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+ int hold, spw;
+ int i;
+ struct txx9ndfmc_drvdata *drvdata;
+ unsigned long gbusclk = plat->gbus_clock;
+ struct resource *res;
+
+ drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ drvdata->base = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ hold = plat->hold ?: 20; /* tDH */
+ spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
+
+ hold = TXX9NDFMC_NS_TO_CYC(gbusclk, hold);
+ spw = TXX9NDFMC_NS_TO_CYC(gbusclk, spw);
+ if (plat->flags & NDFMC_PLAT_FLAG_HOLDADD)
+ hold -= 2; /* actual hold time : (HOLD + 2) BUSCLK */
+ spw -= 1; /* actual wait time : (SPW + 1) BUSCLK */
+ hold = clamp(hold, 1, 15);
+ drvdata->hold = hold;
+ spw = clamp(spw, 1, 15);
+ drvdata->spw = spw;
+ dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
+ (gbusclk + 500000) / 1000000, hold, spw);
+
+ nand_controller_init(&drvdata->controller);
+ drvdata->controller.ops = &txx9ndfmc_controller_ops;
+
+ platform_set_drvdata(dev, drvdata);
+ txx9ndfmc_initialize(dev);
+
+ for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
+ struct txx9ndfmc_priv *txx9_priv;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+
+ if (!(plat->ch_mask & (1 << i)))
+ continue;
+ txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
+ GFP_KERNEL);
+ if (!txx9_priv)
+ continue;
+ chip = &txx9_priv->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = &dev->dev;
+
+ chip->legacy.read_byte = txx9ndfmc_read_byte;
+ chip->legacy.read_buf = txx9ndfmc_read_buf;
+ chip->legacy.write_buf = txx9ndfmc_write_buf;
+ chip->legacy.cmd_ctrl = txx9ndfmc_cmd_ctrl;
+ chip->legacy.dev_ready = txx9ndfmc_dev_ready;
+ chip->legacy.chip_delay = 100;
+ chip->controller = &drvdata->controller;
+
+ nand_set_controller_data(chip, txx9_priv);
+ txx9_priv->dev = dev;
+
+ if (plat->ch_mask != 1) {
+ txx9_priv->cs = i;
+ txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
+ dev_name(&dev->dev), i);
+ } else {
+ txx9_priv->cs = -1;
+ txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
+ GFP_KERNEL);
+ }
+ if (!txx9_priv->mtdname) {
+ kfree(txx9_priv);
+ dev_err(&dev->dev, "Unable to allocate MTD name.\n");
+ continue;
+ }
+ if (plat->wide_mask & (1 << i))
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (nand_scan(chip, 1)) {
+ kfree(txx9_priv->mtdname);
+ kfree(txx9_priv);
+ continue;
+ }
+ mtd->name = txx9_priv->mtdname;
+
+ mtd_device_register(mtd, NULL, 0);
+ drvdata->mtds[i] = mtd;
+ }
+
+ return 0;
+}
+
+static int __exit txx9ndfmc_remove(struct platform_device *dev)
+{
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ int ret, i;
+
+ if (!drvdata)
+ return 0;
+ for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
+ struct mtd_info *mtd = drvdata->mtds[i];
+ struct nand_chip *chip;
+ struct txx9ndfmc_priv *txx9_priv;
+
+ if (!mtd)
+ continue;
+ chip = mtd_to_nand(mtd);
+ txx9_priv = nand_get_controller_data(chip);
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ kfree(txx9_priv->mtdname);
+ kfree(txx9_priv);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int txx9ndfmc_resume(struct platform_device *dev)
+{
+ if (platform_get_drvdata(dev))
+ txx9ndfmc_initialize(dev);
+ return 0;
+}
+#else
+#define txx9ndfmc_resume NULL
+#endif
+
+static struct platform_driver txx9ndfmc_driver = {
+ .remove = __exit_p(txx9ndfmc_remove),
+ .resume = txx9ndfmc_resume,
+ .driver = {
+ .name = "txx9ndfmc",
+ },
+};
+
+module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
+MODULE_ALIAS("platform:txx9ndfmc");
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
new file mode 100644
index 000000000..40d70f991
--- /dev/null
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -0,0 +1,967 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2009-2015 Freescale Semiconductor, Inc. and others
+ *
+ * Description: MPC5125, VF610, MCF54418 and Kinetis K70 Nand driver.
+ * Jason ported to M54418TWR and MVFA5 (VF610).
+ * Authors: Stefan Agner <stefan.agner@toradex.com>
+ * Bill Pringlemeir <bpringlemeir@nbsps.com>
+ * Shaohui Xie <b21989@freescale.com>
+ * Jason Jin <Jason.jin@freescale.com>
+ *
+ * Based on original driver mpc5121_nfc.c.
+ *
+ * Limitations:
+ * - Untested on MPC5125 and M54418.
+ * - DMA and pipelining not used.
+ * - 2K pages or less.
+ * - HW ECC: Only 2K page with 64+ OOB.
+ * - HW ECC: Only 24 and 32-bit error correction implemented.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
+
+#define DRV_NAME "vf610_nfc"
+
+/* Register Offsets */
+#define NFC_FLASH_CMD1 0x3F00
+#define NFC_FLASH_CMD2 0x3F04
+#define NFC_COL_ADDR 0x3F08
+#define NFC_ROW_ADDR 0x3F0c
+#define NFC_ROW_ADDR_INC 0x3F14
+#define NFC_FLASH_STATUS1 0x3F18
+#define NFC_FLASH_STATUS2 0x3F1c
+#define NFC_CACHE_SWAP 0x3F28
+#define NFC_SECTOR_SIZE 0x3F2c
+#define NFC_FLASH_CONFIG 0x3F30
+#define NFC_IRQ_STATUS 0x3F38
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x1000)
+
+#define PAGE_2K 0x0800
+#define OOB_64 0x0040
+#define OOB_MAX 0x0100
+
+/* NFC_CMD2[CODE] controller cycle bit masks */
+#define COMMAND_CMD_BYTE1 BIT(14)
+#define COMMAND_CAR_BYTE1 BIT(13)
+#define COMMAND_CAR_BYTE2 BIT(12)
+#define COMMAND_RAR_BYTE1 BIT(11)
+#define COMMAND_RAR_BYTE2 BIT(10)
+#define COMMAND_RAR_BYTE3 BIT(9)
+#define COMMAND_NADDR_BYTES(x) GENMASK(13, 13 - (x) + 1)
+#define COMMAND_WRITE_DATA BIT(8)
+#define COMMAND_CMD_BYTE2 BIT(7)
+#define COMMAND_RB_HANDSHAKE BIT(6)
+#define COMMAND_READ_DATA BIT(5)
+#define COMMAND_CMD_BYTE3 BIT(4)
+#define COMMAND_READ_STATUS BIT(3)
+#define COMMAND_READ_ID BIT(2)
+
+/* NFC ECC mode define */
+#define ECC_BYPASS 0
+#define ECC_45_BYTE 6
+#define ECC_60_BYTE 7
+
+/*** Register Mask and bit definitions */
+
+/* NFC_FLASH_CMD1 Field */
+#define CMD_BYTE2_MASK 0xFF000000
+#define CMD_BYTE2_SHIFT 24
+
+/* NFC_FLASH_CM2 Field */
+#define CMD_BYTE1_MASK 0xFF000000
+#define CMD_BYTE1_SHIFT 24
+#define CMD_CODE_MASK 0x00FFFF00
+#define CMD_CODE_SHIFT 8
+#define BUFNO_MASK 0x00000006
+#define BUFNO_SHIFT 1
+#define START_BIT BIT(0)
+
+/* NFC_COL_ADDR Field */
+#define COL_ADDR_MASK 0x0000FFFF
+#define COL_ADDR_SHIFT 0
+#define COL_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
+
+/* NFC_ROW_ADDR Field */
+#define ROW_ADDR_MASK 0x00FFFFFF
+#define ROW_ADDR_SHIFT 0
+#define ROW_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
+
+#define ROW_ADDR_CHIP_SEL_RB_MASK 0xF0000000
+#define ROW_ADDR_CHIP_SEL_RB_SHIFT 28
+#define ROW_ADDR_CHIP_SEL_MASK 0x0F000000
+#define ROW_ADDR_CHIP_SEL_SHIFT 24
+
+/* NFC_FLASH_STATUS2 Field */
+#define STATUS_BYTE1_MASK 0x000000FF
+
+/* NFC_FLASH_CONFIG Field */
+#define CONFIG_ECC_SRAM_ADDR_MASK 0x7FC00000
+#define CONFIG_ECC_SRAM_ADDR_SHIFT 22
+#define CONFIG_ECC_SRAM_REQ_BIT BIT(21)
+#define CONFIG_DMA_REQ_BIT BIT(20)
+#define CONFIG_ECC_MODE_MASK 0x000E0000
+#define CONFIG_ECC_MODE_SHIFT 17
+#define CONFIG_FAST_FLASH_BIT BIT(16)
+#define CONFIG_16BIT BIT(7)
+#define CONFIG_BOOT_MODE_BIT BIT(6)
+#define CONFIG_ADDR_AUTO_INCR_BIT BIT(5)
+#define CONFIG_BUFNO_AUTO_INCR_BIT BIT(4)
+#define CONFIG_PAGE_CNT_MASK 0xF
+#define CONFIG_PAGE_CNT_SHIFT 0
+
+/* NFC_IRQ_STATUS Field */
+#define IDLE_IRQ_BIT BIT(29)
+#define IDLE_EN_BIT BIT(20)
+#define CMD_DONE_CLEAR_BIT BIT(18)
+#define IDLE_CLEAR_BIT BIT(17)
+
+/*
+ * ECC status - seems to consume 8 bytes (double word). The documented
+ * status byte is located in the lowest byte of the second word (which is
+ * the 4th or 7th byte depending on endianness).
+ * Calculate an offset to store the ECC status at the end of the buffer.
+ */
+#define ECC_SRAM_ADDR (PAGE_2K + OOB_MAX - 8)
+
+#define ECC_STATUS 0x4
+#define ECC_STATUS_MASK 0x80
+#define ECC_STATUS_ERR_COUNT 0x3F
+
+enum vf610_nfc_variant {
+ NFC_VFC610 = 1,
+};
+
+struct vf610_nfc {
+ struct nand_controller base;
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *regs;
+ struct completion cmd_done;
+ /* Status and ID are in alternate locations. */
+ enum vf610_nfc_variant variant;
+ struct clk *clk;
+ /*
+ * Indicate that user data is accessed (full page/oob). This is
+ * useful to indicate the driver whether to swap byte endianness.
+ * See comments in vf610_nfc_rd_from_sram/vf610_nfc_wr_to_sram.
+ */
+ bool data_access;
+ u32 ecc_mode;
+};
+
+static inline struct vf610_nfc *chip_to_nfc(struct nand_chip *chip)
+{
+ return container_of(chip, struct vf610_nfc, chip);
+}
+
+static inline u32 vf610_nfc_read(struct vf610_nfc *nfc, uint reg)
+{
+ return readl(nfc->regs + reg);
+}
+
+static inline void vf610_nfc_write(struct vf610_nfc *nfc, uint reg, u32 val)
+{
+ writel(val, nfc->regs + reg);
+}
+
+static inline void vf610_nfc_set(struct vf610_nfc *nfc, uint reg, u32 bits)
+{
+ vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) | bits);
+}
+
+static inline void vf610_nfc_clear(struct vf610_nfc *nfc, uint reg, u32 bits)
+{
+ vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) & ~bits);
+}
+
+static inline void vf610_nfc_set_field(struct vf610_nfc *nfc, u32 reg,
+ u32 mask, u32 shift, u32 val)
+{
+ vf610_nfc_write(nfc, reg,
+ (vf610_nfc_read(nfc, reg) & (~mask)) | val << shift);
+}
+
+static inline bool vf610_nfc_kernel_is_little_endian(void)
+{
+#ifdef __LITTLE_ENDIAN
+ return true;
+#else
+ return false;
+#endif
+}
+
+/**
+ * Read accessor for internal SRAM buffer
+ * @dst: destination address in regular memory
+ * @src: source address in SRAM buffer
+ * @len: bytes to copy
+ * @fix_endian: Fix endianness if required
+ *
+ * Use this accessor for the internal SRAM buffers. On the ARM
+ * Freescale Vybrid SoC it's known that the driver can treat
+ * the SRAM buffer as if it's memory. Other platform might need
+ * to treat the buffers differently.
+ *
+ * The controller stores bytes from the NAND chip internally in big
+ * endianness. On little endian platforms such as Vybrid this leads
+ * to reversed byte order.
+ * For performance reason (and earlier probably due to unawareness)
+ * the driver avoids correcting endianness where it has control over
+ * write and read side (e.g. page wise data access).
+ */
+static inline void vf610_nfc_rd_from_sram(void *dst, const void __iomem *src,
+ size_t len, bool fix_endian)
+{
+ if (vf610_nfc_kernel_is_little_endian() && fix_endian) {
+ unsigned int i;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val = swab32(__raw_readl(src + i));
+
+ memcpy(dst + i, &val, min(sizeof(val), len - i));
+ }
+ } else {
+ memcpy_fromio(dst, src, len);
+ }
+}
+
+/**
+ * Write accessor for internal SRAM buffer
+ * @dst: destination address in SRAM buffer
+ * @src: source address in regular memory
+ * @len: bytes to copy
+ * @fix_endian: Fix endianness if required
+ *
+ * Use this accessor for the internal SRAM buffers. On the ARM
+ * Freescale Vybrid SoC it's known that the driver can treat
+ * the SRAM buffer as if it's memory. Other platform might need
+ * to treat the buffers differently.
+ *
+ * The controller stores bytes from the NAND chip internally in big
+ * endianness. On little endian platforms such as Vybrid this leads
+ * to reversed byte order.
+ * For performance reason (and earlier probably due to unawareness)
+ * the driver avoids correcting endianness where it has control over
+ * write and read side (e.g. page wise data access).
+ */
+static inline void vf610_nfc_wr_to_sram(void __iomem *dst, const void *src,
+ size_t len, bool fix_endian)
+{
+ if (vf610_nfc_kernel_is_little_endian() && fix_endian) {
+ unsigned int i;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val;
+
+ memcpy(&val, src + i, min(sizeof(val), len - i));
+ __raw_writel(swab32(val), dst + i);
+ }
+ } else {
+ memcpy_toio(dst, src, len);
+ }
+}
+
+/* Clear flags for upcoming command */
+static inline void vf610_nfc_clear_status(struct vf610_nfc *nfc)
+{
+ u32 tmp = vf610_nfc_read(nfc, NFC_IRQ_STATUS);
+
+ tmp |= CMD_DONE_CLEAR_BIT | IDLE_CLEAR_BIT;
+ vf610_nfc_write(nfc, NFC_IRQ_STATUS, tmp);
+}
+
+static void vf610_nfc_done(struct vf610_nfc *nfc)
+{
+ unsigned long timeout = msecs_to_jiffies(100);
+
+ /*
+ * Barrier is needed after this write. This write need
+ * to be done before reading the next register the first
+ * time.
+ * vf610_nfc_set implicates such a barrier by using writel
+ * to write to the register.
+ */
+ vf610_nfc_set(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
+ vf610_nfc_set(nfc, NFC_FLASH_CMD2, START_BIT);
+
+ if (!wait_for_completion_timeout(&nfc->cmd_done, timeout))
+ dev_warn(nfc->dev, "Timeout while waiting for BUSY.\n");
+
+ vf610_nfc_clear_status(nfc);
+}
+
+static irqreturn_t vf610_nfc_irq(int irq, void *data)
+{
+ struct vf610_nfc *nfc = data;
+
+ vf610_nfc_clear(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
+ complete(&nfc->cmd_done);
+
+ return IRQ_HANDLED;
+}
+
+static inline void vf610_nfc_ecc_mode(struct vf610_nfc *nfc, int ecc_mode)
+{
+ vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
+ CONFIG_ECC_MODE_MASK,
+ CONFIG_ECC_MODE_SHIFT, ecc_mode);
+}
+
+static inline void vf610_nfc_run(struct vf610_nfc *nfc, u32 col, u32 row,
+ u32 cmd1, u32 cmd2, u32 trfr_sz)
+{
+ vf610_nfc_set_field(nfc, NFC_COL_ADDR, COL_ADDR_MASK,
+ COL_ADDR_SHIFT, col);
+
+ vf610_nfc_set_field(nfc, NFC_ROW_ADDR, ROW_ADDR_MASK,
+ ROW_ADDR_SHIFT, row);
+
+ vf610_nfc_write(nfc, NFC_SECTOR_SIZE, trfr_sz);
+ vf610_nfc_write(nfc, NFC_FLASH_CMD1, cmd1);
+ vf610_nfc_write(nfc, NFC_FLASH_CMD2, cmd2);
+
+ dev_dbg(nfc->dev,
+ "col 0x%04x, row 0x%08x, cmd1 0x%08x, cmd2 0x%08x, len %d\n",
+ col, row, cmd1, cmd2, trfr_sz);
+
+ vf610_nfc_done(nfc);
+}
+
+static inline const struct nand_op_instr *
+vf610_get_next_instr(const struct nand_subop *subop, int *op_id)
+{
+ if (*op_id + 1 >= subop->ninstrs)
+ return NULL;
+
+ (*op_id)++;
+
+ return &subop->instrs[*op_id];
+}
+
+static int vf610_nfc_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ int op_id = -1, trfr_sz = 0, offset = 0;
+ u32 col = 0, row = 0, cmd1 = 0, cmd2 = 0, code = 0;
+ bool force8bit = false;
+
+ /*
+ * Some ops are optional, but the hardware requires the operations
+ * to be in this exact order.
+ * The op parser enforces the order and makes sure that there isn't
+ * a read and write element in a single operation.
+ */
+ instr = vf610_get_next_instr(subop, &op_id);
+ if (!instr)
+ return -EINVAL;
+
+ if (instr && instr->type == NAND_OP_CMD_INSTR) {
+ cmd2 |= instr->ctx.cmd.opcode << CMD_BYTE1_SHIFT;
+ code |= COMMAND_CMD_BYTE1;
+
+ instr = vf610_get_next_instr(subop, &op_id);
+ }
+
+ if (instr && instr->type == NAND_OP_ADDR_INSTR) {
+ int naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ int i = nand_subop_get_addr_start_off(subop, op_id);
+
+ for (; i < naddrs; i++) {
+ u8 val = instr->ctx.addr.addrs[i];
+
+ if (i < 2)
+ col |= COL_ADDR(i, val);
+ else
+ row |= ROW_ADDR(i - 2, val);
+ }
+ code |= COMMAND_NADDR_BYTES(naddrs);
+
+ instr = vf610_get_next_instr(subop, &op_id);
+ }
+
+ if (instr && instr->type == NAND_OP_DATA_OUT_INSTR) {
+ trfr_sz = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ force8bit = instr->ctx.data.force_8bit;
+
+ /*
+ * Don't fix endianness on page access for historical reasons.
+ * See comment in vf610_nfc_wr_to_sram
+ */
+ vf610_nfc_wr_to_sram(nfc->regs + NFC_MAIN_AREA(0) + offset,
+ instr->ctx.data.buf.out + offset,
+ trfr_sz, !nfc->data_access);
+ code |= COMMAND_WRITE_DATA;
+
+ instr = vf610_get_next_instr(subop, &op_id);
+ }
+
+ if (instr && instr->type == NAND_OP_CMD_INSTR) {
+ cmd1 |= instr->ctx.cmd.opcode << CMD_BYTE2_SHIFT;
+ code |= COMMAND_CMD_BYTE2;
+
+ instr = vf610_get_next_instr(subop, &op_id);
+ }
+
+ if (instr && instr->type == NAND_OP_WAITRDY_INSTR) {
+ code |= COMMAND_RB_HANDSHAKE;
+
+ instr = vf610_get_next_instr(subop, &op_id);
+ }
+
+ if (instr && instr->type == NAND_OP_DATA_IN_INSTR) {
+ trfr_sz = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ force8bit = instr->ctx.data.force_8bit;
+
+ code |= COMMAND_READ_DATA;
+ }
+
+ if (force8bit && (chip->options & NAND_BUSWIDTH_16))
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+
+ cmd2 |= code << CMD_CODE_SHIFT;
+
+ vf610_nfc_run(nfc, col, row, cmd1, cmd2, trfr_sz);
+
+ if (instr && instr->type == NAND_OP_DATA_IN_INSTR) {
+ /*
+ * Don't fix endianness on page access for historical reasons.
+ * See comment in vf610_nfc_rd_from_sram
+ */
+ vf610_nfc_rd_from_sram(instr->ctx.data.buf.in + offset,
+ nfc->regs + NFC_MAIN_AREA(0) + offset,
+ trfr_sz, !nfc->data_access);
+ }
+
+ if (force8bit && (chip->options & NAND_BUSWIDTH_16))
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+
+ return 0;
+}
+
+static const struct nand_op_parser vf610_nfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(vf610_nfc_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, PAGE_2K + OOB_MAX),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(vf610_nfc_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, PAGE_2K + OOB_MAX)),
+ );
+
+/*
+ * This function supports Vybrid only (MPC5125 would have full RB and four CS)
+ */
+static void vf610_nfc_select_target(struct nand_chip *chip, unsigned int cs)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ u32 tmp;
+
+ /* Vybrid only (MPC5125 would have full RB and four CS) */
+ if (nfc->variant != NFC_VFC610)
+ return;
+
+ tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
+ tmp &= ~(ROW_ADDR_CHIP_SEL_RB_MASK | ROW_ADDR_CHIP_SEL_MASK);
+ tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
+ tmp |= BIT(cs) << ROW_ADDR_CHIP_SEL_SHIFT;
+
+ vf610_nfc_write(nfc, NFC_ROW_ADDR, tmp);
+}
+
+static int vf610_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (!check_only)
+ vf610_nfc_select_target(chip, op->cs);
+
+ return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op,
+ check_only);
+}
+
+static inline int vf610_nfc_correct_data(struct nand_chip *chip, uint8_t *dat,
+ uint8_t *oob, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 ecc_status_off = NFC_MAIN_AREA(0) + ECC_SRAM_ADDR + ECC_STATUS;
+ u8 ecc_status;
+ u8 ecc_count;
+ int flips_threshold = nfc->chip.ecc.strength / 2;
+
+ ecc_status = vf610_nfc_read(nfc, ecc_status_off) & 0xff;
+ ecc_count = ecc_status & ECC_STATUS_ERR_COUNT;
+
+ if (!(ecc_status & ECC_STATUS_MASK))
+ return ecc_count;
+
+ nfc->data_access = true;
+ nand_read_oob_op(&nfc->chip, page, 0, oob, mtd->oobsize);
+ nfc->data_access = false;
+
+ /*
+ * On an erased page, bit count (including OOB) should be zero or
+ * at least less then half of the ECC strength.
+ */
+ return nand_check_erased_ecc_chunk(dat, nfc->chip.ecc.size, oob,
+ mtd->oobsize, NULL, 0,
+ flips_threshold);
+}
+
+static void vf610_nfc_fill_row(struct nand_chip *chip, int page, u32 *code,
+ u32 *row)
+{
+ *row = ROW_ADDR(0, page & 0xff) | ROW_ADDR(1, page >> 8);
+ *code |= COMMAND_RAR_BYTE1 | COMMAND_RAR_BYTE2;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ *row |= ROW_ADDR(2, page >> 16);
+ *code |= COMMAND_RAR_BYTE3;
+ }
+}
+
+static int vf610_nfc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int trfr_sz = mtd->writesize + mtd->oobsize;
+ u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
+ int stat;
+
+ vf610_nfc_select_target(chip, chip->cur_cs);
+
+ cmd2 |= NAND_CMD_READ0 << CMD_BYTE1_SHIFT;
+ code |= COMMAND_CMD_BYTE1 | COMMAND_CAR_BYTE1 | COMMAND_CAR_BYTE2;
+
+ vf610_nfc_fill_row(chip, page, &code, &row);
+
+ cmd1 |= NAND_CMD_READSTART << CMD_BYTE2_SHIFT;
+ code |= COMMAND_CMD_BYTE2 | COMMAND_RB_HANDSHAKE | COMMAND_READ_DATA;
+
+ cmd2 |= code << CMD_CODE_SHIFT;
+
+ vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
+ vf610_nfc_run(nfc, 0, row, cmd1, cmd2, trfr_sz);
+ vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+
+ /*
+ * Don't fix endianness on page access for historical reasons.
+ * See comment in vf610_nfc_rd_from_sram
+ */
+ vf610_nfc_rd_from_sram(buf, nfc->regs + NFC_MAIN_AREA(0),
+ mtd->writesize, false);
+ if (oob_required)
+ vf610_nfc_rd_from_sram(chip->oob_poi,
+ nfc->regs + NFC_MAIN_AREA(0) +
+ mtd->writesize,
+ mtd->oobsize, false);
+
+ stat = vf610_nfc_correct_data(chip, buf, chip->oob_poi, page);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ return 0;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ return stat;
+ }
+}
+
+static int vf610_nfc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int trfr_sz = mtd->writesize + mtd->oobsize;
+ u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
+ u8 status;
+ int ret;
+
+ vf610_nfc_select_target(chip, chip->cur_cs);
+
+ cmd2 |= NAND_CMD_SEQIN << CMD_BYTE1_SHIFT;
+ code |= COMMAND_CMD_BYTE1 | COMMAND_CAR_BYTE1 | COMMAND_CAR_BYTE2;
+
+ vf610_nfc_fill_row(chip, page, &code, &row);
+
+ cmd1 |= NAND_CMD_PAGEPROG << CMD_BYTE2_SHIFT;
+ code |= COMMAND_CMD_BYTE2 | COMMAND_WRITE_DATA;
+
+ /*
+ * Don't fix endianness on page access for historical reasons.
+ * See comment in vf610_nfc_wr_to_sram
+ */
+ vf610_nfc_wr_to_sram(nfc->regs + NFC_MAIN_AREA(0), buf,
+ mtd->writesize, false);
+
+ code |= COMMAND_RB_HANDSHAKE;
+ cmd2 |= code << CMD_CODE_SHIFT;
+
+ vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
+ vf610_nfc_run(nfc, 0, row, cmd1, cmd2, trfr_sz);
+ vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int vf610_nfc_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ int ret;
+
+ nfc->data_access = true;
+ ret = nand_read_page_raw(chip, buf, oob_required, page);
+ nfc->data_access = false;
+
+ return ret;
+}
+
+static int vf610_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ nfc->data_access = true;
+ ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (!ret && oob_required)
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ nfc->data_access = false;
+
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int vf610_nfc_read_oob(struct nand_chip *chip, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ int ret;
+
+ nfc->data_access = true;
+ ret = nand_read_oob_std(chip, page);
+ nfc->data_access = false;
+
+ return ret;
+}
+
+static int vf610_nfc_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ int ret;
+
+ nfc->data_access = true;
+ ret = nand_prog_page_begin_op(chip, page, mtd->writesize,
+ chip->oob_poi, mtd->oobsize);
+ nfc->data_access = false;
+
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+static const struct of_device_id vf610_nfc_dt_ids[] = {
+ { .compatible = "fsl,vf610-nfc", .data = (void *)NFC_VFC610 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vf610_nfc_dt_ids);
+
+static void vf610_nfc_preinit_controller(struct vf610_nfc *nfc)
+{
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_ADDR_AUTO_INCR_BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BUFNO_AUTO_INCR_BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BOOT_MODE_BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_DMA_REQ_BIT);
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_FAST_FLASH_BIT);
+ vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+
+ /* Disable virtual pages, only one elementary transfer unit */
+ vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG, CONFIG_PAGE_CNT_MASK,
+ CONFIG_PAGE_CNT_SHIFT, 1);
+}
+
+static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
+{
+ if (nfc->chip.options & NAND_BUSWIDTH_16)
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+ else
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+
+ if (nfc->chip.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ /* Set ECC status offset in SRAM */
+ vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
+ CONFIG_ECC_SRAM_ADDR_MASK,
+ CONFIG_ECC_SRAM_ADDR_SHIFT,
+ ECC_SRAM_ADDR >> 3);
+
+ /* Enable ECC status in SRAM */
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_ECC_SRAM_REQ_BIT);
+ }
+}
+
+static int vf610_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+
+ vf610_nfc_init_controller(nfc);
+
+ /* Bad block options. */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ /* Single buffer only, max 256 OOB minus ECC status */
+ if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
+ dev_err(nfc->dev, "Unsupported flash page size\n");
+ return -ENXIO;
+ }
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
+ dev_err(nfc->dev, "Unsupported flash with hwecc\n");
+ return -ENXIO;
+ }
+
+ if (chip->ecc.size != mtd->writesize) {
+ dev_err(nfc->dev, "Step size needs to be page size\n");
+ return -ENXIO;
+ }
+
+ /* Only 64 byte ECC layouts known */
+ if (mtd->oobsize > 64)
+ mtd->oobsize = 64;
+
+ /* Use default large page ECC layout defined in NAND core */
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+ if (chip->ecc.strength == 32) {
+ nfc->ecc_mode = ECC_60_BYTE;
+ chip->ecc.bytes = 60;
+ } else if (chip->ecc.strength == 24) {
+ nfc->ecc_mode = ECC_45_BYTE;
+ chip->ecc.bytes = 45;
+ } else {
+ dev_err(nfc->dev, "Unsupported ECC strength\n");
+ return -ENXIO;
+ }
+
+ chip->ecc.read_page = vf610_nfc_read_page;
+ chip->ecc.write_page = vf610_nfc_write_page;
+ chip->ecc.read_page_raw = vf610_nfc_read_page_raw;
+ chip->ecc.write_page_raw = vf610_nfc_write_page_raw;
+ chip->ecc.read_oob = vf610_nfc_read_oob;
+ chip->ecc.write_oob = vf610_nfc_write_oob;
+
+ chip->ecc.size = PAGE_2K;
+
+ return 0;
+}
+
+static const struct nand_controller_ops vf610_nfc_controller_ops = {
+ .attach_chip = vf610_nfc_attach_chip,
+ .exec_op = vf610_nfc_exec_op,
+
+};
+
+static int vf610_nfc_probe(struct platform_device *pdev)
+{
+ struct vf610_nfc *nfc;
+ struct resource *res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct device_node *child;
+ const struct of_device_id *of_id;
+ int err;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = &pdev->dev;
+ chip = &nfc->chip;
+ mtd = nand_to_mtd(chip);
+
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = nfc->dev;
+ mtd->name = DRV_NAME;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(nfc->dev, res);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ nfc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nfc->clk))
+ return PTR_ERR(nfc->clk);
+
+ err = clk_prepare_enable(nfc->clk);
+ if (err) {
+ dev_err(nfc->dev, "Unable to enable clock!\n");
+ return err;
+ }
+
+ of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
+ if (!of_id) {
+ err = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ nfc->variant = (enum vf610_nfc_variant)of_id->data;
+
+ for_each_available_child_of_node(nfc->dev->of_node, child) {
+ if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
+
+ if (nand_get_flash_node(chip)) {
+ dev_err(nfc->dev,
+ "Only one NAND chip supported!\n");
+ err = -EINVAL;
+ of_node_put(child);
+ goto err_disable_clk;
+ }
+
+ nand_set_flash_node(chip, child);
+ }
+ }
+
+ if (!nand_get_flash_node(chip)) {
+ dev_err(nfc->dev, "NAND chip sub-node missing!\n");
+ err = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ init_completion(&nfc->cmd_done);
+
+ err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, nfc);
+ if (err) {
+ dev_err(nfc->dev, "Error requesting IRQ!\n");
+ goto err_disable_clk;
+ }
+
+ vf610_nfc_preinit_controller(nfc);
+
+ nand_controller_init(&nfc->base);
+ nfc->base.ops = &vf610_nfc_controller_ops;
+ chip->controller = &nfc->base;
+
+ /* Scan the NAND chip */
+ err = nand_scan(chip, 1);
+ if (err)
+ goto err_disable_clk;
+
+ platform_set_drvdata(pdev, nfc);
+
+ /* Register device in MTD */
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto err_cleanup_nand;
+ return 0;
+
+err_cleanup_nand:
+ nand_cleanup(chip);
+err_disable_clk:
+ clk_disable_unprepare(nfc->clk);
+ return err;
+}
+
+static int vf610_nfc_remove(struct platform_device *pdev)
+{
+ struct vf610_nfc *nfc = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &nfc->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ clk_disable_unprepare(nfc->clk);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vf610_nfc_suspend(struct device *dev)
+{
+ struct vf610_nfc *nfc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(nfc->clk);
+ return 0;
+}
+
+static int vf610_nfc_resume(struct device *dev)
+{
+ struct vf610_nfc *nfc = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(nfc->clk);
+ if (err)
+ return err;
+
+ vf610_nfc_preinit_controller(nfc);
+ vf610_nfc_init_controller(nfc);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vf610_nfc_pm_ops, vf610_nfc_suspend, vf610_nfc_resume);
+
+static struct platform_driver vf610_nfc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = vf610_nfc_dt_ids,
+ .pm = &vf610_nfc_pm_ops,
+ },
+ .probe = vf610_nfc_probe,
+ .remove = vf610_nfc_remove,
+};
+
+module_platform_driver(vf610_nfc_driver);
+
+MODULE_AUTHOR("Stefan Agner <stefan.agner@toradex.com>");
+MODULE_DESCRIPTION("Freescale VF610/MPC5125 NFC MTD NAND driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
new file mode 100644
index 000000000..236fd8c5a
--- /dev/null
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright © 2012 John Crispin <john@phrozen.org>
+ * Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/mtd/rawnand.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+/* nand registers */
+#define EBU_ADDSEL1 0x24
+#define EBU_NAND_CON 0xB0
+#define EBU_NAND_WAIT 0xB4
+#define NAND_WAIT_RD BIT(0) /* NAND flash status output */
+#define NAND_WAIT_WR_C BIT(3) /* NAND Write/Read complete */
+#define EBU_NAND_ECC0 0xB8
+#define EBU_NAND_ECC_AC 0xBC
+
+/*
+ * nand commands
+ * The pins of the NAND chip are selected based on the address bits of the
+ * "register" read and write. There are no special registers, but an
+ * address range and the lower address bits are used to activate the
+ * correct line. For example when the bit (1 << 2) is set in the address
+ * the ALE pin will be activated.
+ */
+#define NAND_CMD_ALE BIT(2) /* address latch enable */
+#define NAND_CMD_CLE BIT(3) /* command latch enable */
+#define NAND_CMD_CS BIT(4) /* chip select */
+#define NAND_CMD_SE BIT(5) /* spare area access latch */
+#define NAND_CMD_WP BIT(6) /* write protect */
+#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
+#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
+#define NAND_WRITE_DATA (NAND_CMD_CS)
+#define NAND_READ_DATA (NAND_CMD_CS)
+
+/* we need to tel the ebu which addr we mapped the nand to */
+#define ADDSEL1_MASK(x) (x << 4)
+#define ADDSEL1_REGEN 1
+
+/* we need to tell the EBU that we have nand attached and set it up properly */
+#define BUSCON1_SETUP (1 << 22)
+#define BUSCON1_BCGEN_RES (0x3 << 12)
+#define BUSCON1_WAITWRC2 (2 << 8)
+#define BUSCON1_WAITRDC2 (2 << 6)
+#define BUSCON1_HOLDC1 (1 << 4)
+#define BUSCON1_RECOVC1 (1 << 2)
+#define BUSCON1_CMULT4 1
+
+#define NAND_CON_CE (1 << 20)
+#define NAND_CON_OUT_CS1 (1 << 10)
+#define NAND_CON_IN_CS1 (1 << 8)
+#define NAND_CON_PRE_P (1 << 7)
+#define NAND_CON_WP_P (1 << 6)
+#define NAND_CON_SE_P (1 << 5)
+#define NAND_CON_CS_P (1 << 4)
+#define NAND_CON_CSMUX (1 << 1)
+#define NAND_CON_NANDM 1
+
+struct xway_nand_data {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ unsigned long csflags;
+ void __iomem *nandaddr;
+};
+
+static u8 xway_readb(struct mtd_info *mtd, int op)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct xway_nand_data *data = nand_get_controller_data(chip);
+
+ return readb(data->nandaddr + op);
+}
+
+static void xway_writeb(struct mtd_info *mtd, int op, u8 value)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct xway_nand_data *data = nand_get_controller_data(chip);
+
+ writeb(value, data->nandaddr + op);
+}
+
+static void xway_select_chip(struct nand_chip *chip, int select)
+{
+ struct xway_nand_data *data = nand_get_controller_data(chip);
+
+ switch (select) {
+ case -1:
+ ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
+ ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+ spin_unlock_irqrestore(&ebu_lock, data->csflags);
+ break;
+ case 0:
+ spin_lock_irqsave(&ebu_lock, data->csflags);
+ ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
+ ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void xway_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ xway_writeb(mtd, NAND_WRITE_CMD, cmd);
+ else if (ctrl & NAND_ALE)
+ xway_writeb(mtd, NAND_WRITE_ADDR, cmd);
+
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+}
+
+static int xway_dev_ready(struct nand_chip *chip)
+{
+ return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
+}
+
+static unsigned char xway_read_byte(struct nand_chip *chip)
+{
+ return xway_readb(nand_to_mtd(chip), NAND_READ_DATA);
+}
+
+static void xway_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = xway_readb(nand_to_mtd(chip), NAND_WRITE_DATA);
+}
+
+static void xway_write_buf(struct nand_chip *chip, const u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ xway_writeb(nand_to_mtd(chip), NAND_WRITE_DATA, buf[i]);
+}
+
+static int xway_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops xway_nand_ops = {
+ .attach_chip = xway_attach_chip,
+};
+
+/*
+ * Probe for the NAND device.
+ */
+static int xway_nand_probe(struct platform_device *pdev)
+{
+ struct xway_nand_data *data;
+ struct mtd_info *mtd;
+ struct resource *res;
+ int err;
+ u32 cs;
+ u32 cs_flag = 0;
+
+ /* Allocate memory for the device structure (and zero it) */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->nandaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->nandaddr))
+ return PTR_ERR(data->nandaddr);
+
+ nand_set_flash_node(&data->chip, pdev->dev.of_node);
+ mtd = nand_to_mtd(&data->chip);
+ mtd->dev.parent = &pdev->dev;
+
+ data->chip.legacy.cmd_ctrl = xway_cmd_ctrl;
+ data->chip.legacy.dev_ready = xway_dev_ready;
+ data->chip.legacy.select_chip = xway_select_chip;
+ data->chip.legacy.write_buf = xway_write_buf;
+ data->chip.legacy.read_buf = xway_read_buf;
+ data->chip.legacy.read_byte = xway_read_byte;
+ data->chip.legacy.chip_delay = 30;
+
+ nand_controller_init(&data->controller);
+ data->controller.ops = &xway_nand_ops;
+ data->chip.controller = &data->controller;
+
+ platform_set_drvdata(pdev, data);
+ nand_set_controller_data(&data->chip, data);
+
+ /* load our CS from the DT. Either we find a valid 1 or default to 0 */
+ err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs);
+ if (!err && cs == 1)
+ cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
+
+ /* setup the EBU to run in NAND mode on our base addr */
+ ltq_ebu_w32(CPHYSADDR(data->nandaddr)
+ | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
+
+ ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
+ | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
+ | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
+
+ ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
+ | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
+ | cs_flag, EBU_NAND_CON);
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(&data->chip, 1);
+ if (err)
+ return err;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ nand_cleanup(&data->chip);
+
+ return err;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int xway_nand_remove(struct platform_device *pdev)
+{
+ struct xway_nand_data *data = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &data->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ return 0;
+}
+
+static const struct of_device_id xway_nand_match[] = {
+ { .compatible = "lantiq,nand-xway" },
+ {},
+};
+
+static struct platform_driver xway_nand_driver = {
+ .probe = xway_nand_probe,
+ .remove = xway_nand_remove,
+ .driver = {
+ .name = "lantiq,nand-xway",
+ .of_match_table = xway_nand_match,
+ },
+};
+
+builtin_platform_driver(xway_nand_driver);